diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 600043a40..a0201b7ff 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -14,7 +14,6 @@ jobs: exclude: | */.git/* ./.cache/* - ./vendor/* ./packaging/scripts/* misspell: @@ -31,7 +30,6 @@ jobs: exclude: | */.git/* ./.cache/* - ./vendor/* alex: name: runner / alex diff --git a/Makefile b/Makefile index 67b041cc3..4b778bfa4 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ define ENVS_STATIC CGO_ENABLED=$(CGO_ENABLED) endef -BUILD_FLAGS=-mod=vendor -tags gssapi +BUILD_FLAGS=-mod=mod -tags gssapi versionpath?=github.com/percona/percona-backup-mongodb/pbm/version LDFLAGS= -X $(versionpath).gitCommit=$(GITCOMMIT) -X $(versionpath).gitBranch=$(GITBRANCH) -X $(versionpath).buildTime=$(BUILDTIME) -X $(versionpath).version=$(VERSION) LDFLAGS_STATIC=$(LDFLAGS) -extldflags "-static" diff --git a/go.mod b/go.mod index e69516e3d..64b6819fd 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/percona/percona-backup-mongodb -go 1.22 +go 1.23.0 + +toolchain go1.23.4 require ( cloud.google.com/go/storage v1.38.0 @@ -31,7 +33,7 @@ require ( github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0 go.mongodb.org/mongo-driver v1.17.1 golang.org/x/mod v0.19.0 - golang.org/x/sync v0.11.0 + golang.org/x/sync v0.13.0 google.golang.org/api v0.171.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -120,13 +122,13 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.33.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 // indirect golang.org/x/net v0.35.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect diff --git a/go.sum b/go.sum index 51b35147f..4fc0d2cb1 100644 --- a/go.sum +++ b/go.sum @@ -344,6 +344,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= @@ -379,6 +381,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -397,16 +400,19 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/LICENSE b/vendor/cloud.google.com/go/compute/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/compute/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go deleted file mode 100644 index 40d1a8916..000000000 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.25.1" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md deleted file mode 100644 index 06b957349..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ /dev/null @@ -1,19 +0,0 @@ -# Changes - -## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) - - -### Bug Fixes - -* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) - -## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) - - -### Bug Fixes - -* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) - -## [0.1.0] (2022-10-26) - -Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md deleted file mode 100644 index f940fb2c8..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Compute API - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) - -This is a utility library for communicating with Google Cloud metadata service -on Google Cloud. - -## Install - -```bash -go get cloud.google.com/go/compute/metadata -``` - -## Go Version Support - -See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) -section in the root directory's README. - -## Contributing - -Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index c17faa142..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://cloud.google.com/compute/docs/metadata/overview. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var defaultClient = &Client{hc: newDefaultHTTPClient()} - -func newDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - IdleConnTimeout: 60 * time.Second, - }, - Timeout: 5 * time.Second, - } -} - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := newDefaultHTTPClient().Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on the default client. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return defaultClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. -// Returns the client that uses the specified http.Client for HTTP requests. -// If nil is specified, returns the default client. -func NewClient(c *http.Client) *Client { - if c == nil { - return defaultClient - } - - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - ctx := context.TODO() - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - suffix = strings.TrimLeft(suffix, "/") - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return "", "", err - } - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - var res *http.Response - var reqErr error - retryer := newRetryer() - for { - res, reqErr = c.hc.Do(req) - var code int - if res != nil { - code = res.StatusCode - } - if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { - if err := sleep(ctx, delay); err != nil { - return "", "", err - } - continue - } - break - } - if reqErr != nil { - return "", "", reqErr - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Email(serviceAccount string) (string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - return c.getTrimmed("instance/name") -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go deleted file mode 100644 index 0f18f3cda..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "context" - "io" - "math/rand" - "net/http" - "time" -) - -const ( - maxRetryAttempts = 5 -) - -var ( - syscallRetryable = func(err error) bool { return false } -) - -// defaultBackoff is basically equivalent to gax.Backoff without the need for -// the dependency. -type defaultBackoff struct { - max time.Duration - mul float64 - cur time.Duration -} - -func (b *defaultBackoff) Pause() time.Duration { - d := time.Duration(1 + rand.Int63n(int64(b.cur))) - b.cur = time.Duration(float64(b.cur) * b.mul) - if b.cur > b.max { - b.cur = b.max - } - return d -} - -// sleep is the equivalent of gax.Sleep without the need for the dependency. -func sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -func newRetryer() *metadataRetryer { - return &metadataRetryer{bo: &defaultBackoff{ - cur: 100 * time.Millisecond, - max: 30 * time.Second, - mul: 2, - }} -} - -type backoff interface { - Pause() time.Duration -} - -type metadataRetryer struct { - bo backoff - attempts int -} - -func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) { - if status == http.StatusOK { - return 0, false - } - retryOk := shouldRetry(status, err) - if !retryOk { - return 0, false - } - if r.attempts == maxRetryAttempts { - return 0, false - } - r.attempts++ - return r.bo.Pause(), true -} - -func shouldRetry(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - // Transient network errors should be retried. - if syscallRetryable(err) { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true - } - } - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) - } - return false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go deleted file mode 100644 index bb412f891..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package metadata - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go deleted file mode 100644 index 4cef48500..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the {{.RootMod}} import, won't actually become part of -// the resultant binary. -//go:build modhack -// +build modhack - -package metadata - -// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md deleted file mode 100644 index 43a179384..000000000 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ /dev/null @@ -1,161 +0,0 @@ -# Changes - - -## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) - - -### Bug Fixes - -* **iam:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698)) - -## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) - - -### Bug Fixes - -* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) - -## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26) - - -### Bug Fixes - -* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) - -## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12) - - -### Bug Fixes - -* **iam:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) - -## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) - - -### Documentation - -* **iam:** Minor formatting ([b4349cc](https://github.com/googleapis/google-cloud-go/commit/b4349cc507870ff8629bbc07de578b63bb889626)) - -## [1.1.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.0...iam/v1.1.1) (2023-06-20) - - -### Bug Fixes - -* **iam:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) - -## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) - - -### Features - -* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) - -## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) - - -### Bug Fixes - -* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) - -## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) - - -### Features - -* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) - -## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) - - -### Features - -* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) - -## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17) - - -### Features - -* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb)) - -## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16) - - -### Features - -* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8)) - -## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04) - - -### Features - -* **iam:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) - -## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.8.0...iam/v0.9.0) (2022-12-15) - - -### Features - -* **iam:** Rewrite iam sigs and update proto import ([#7137](https://github.com/googleapis/google-cloud-go/issues/7137)) ([ad67fa3](https://github.com/googleapis/google-cloud-go/commit/ad67fa36c263c161226f7fecbab5221592374dca)) - -## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05) - - -### Features - -* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38)) - -## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03) - - -### Features - -* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) - -## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25) - - -### Features - -* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc)) - -## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28) - - -### Features - -* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7)) - -## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06) - - -### Features - -* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50)) - -## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23) - - -### Features - -* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - -## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14) - - -### Features - -* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e)) - -### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14) - - -### Bug Fixes - -* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9)) - -## v0.1.0 - -This is the first tag to carve out iam as its own module. See -[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository). diff --git a/vendor/cloud.google.com/go/iam/LICENSE b/vendor/cloud.google.com/go/iam/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/iam/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/iam/README.md b/vendor/cloud.google.com/go/iam/README.md deleted file mode 100644 index 0072cc9e2..000000000 --- a/vendor/cloud.google.com/go/iam/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# IAM API - -[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/iam.svg)](https://pkg.go.dev/cloud.google.com/go/iam) - -Go Client Library for IAM API. - -## Install - -```bash -go get cloud.google.com/go/iam -``` - -## Stability - -The stability of this module is indicated by SemVer. - -However, a `v1+` module may have breaking changes in two scenarios: - -* Packages with `alpha` or `beta` in the import path -* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature). - -## Go Version Support - -See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) -section in the root directory's README. - -## Authorization - -See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization) -section in the root directory's README. - -## Contributing - -Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) -document for details. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. See -[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go deleted file mode 100644 index b5243e612..000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ /dev/null @@ -1,672 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 -// source: google/iam/v1/iam_policy.proto - -package iampb - -import ( - context "context" - reflect "reflect" - sync "sync" - - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Request message for `SetIamPolicy` method. -type SetIamPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy is being specified. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // REQUIRED: The complete policy to be applied to the `resource`. The size of - // the policy is limited to a few 10s of KB. An empty policy is a - // valid policy but certain Cloud Platform services (such as Projects) - // might reject them. - Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` - // OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only - // the fields in the mask will be modified. If no mask is provided, the - // following default mask is used: - // - // `paths: "bindings, etag"` - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *SetIamPolicyRequest) Reset() { - *x = SetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetIamPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetIamPolicyRequest) ProtoMessage() {} - -func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. -func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} -} - -func (x *SetIamPolicyRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *SetIamPolicyRequest) GetPolicy() *Policy { - if x != nil { - return x.Policy - } - return nil -} - -func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request message for `GetIamPolicy` method. -type GetIamPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy is being requested. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // OPTIONAL: A `GetPolicyOptions` object for specifying options to - // `GetIamPolicy`. - Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *GetIamPolicyRequest) Reset() { - *x = GetIamPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetIamPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetIamPolicyRequest) ProtoMessage() {} - -func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. -func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} -} - -func (x *GetIamPolicyRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { - if x != nil { - return x.Options - } - return nil -} - -// Request message for `TestIamPermissions` method. -type TestIamPermissionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // REQUIRED: The resource for which the policy detail is being requested. - // See the operation documentation for the appropriate value for this field. - Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // The set of permissions to check for the `resource`. Permissions with - // wildcards (such as '*' or 'storage.*') are not allowed. For more - // information see - // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` -} - -func (x *TestIamPermissionsRequest) Reset() { - *x = TestIamPermissionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TestIamPermissionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TestIamPermissionsRequest) ProtoMessage() {} - -func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. -func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} -} - -func (x *TestIamPermissionsRequest) GetResource() string { - if x != nil { - return x.Resource - } - return "" -} - -func (x *TestIamPermissionsRequest) GetPermissions() []string { - if x != nil { - return x.Permissions - } - return nil -} - -// Response message for `TestIamPermissions` method. -type TestIamPermissionsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A subset of `TestPermissionsRequest.permissions` that the caller is - // allowed. - Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` -} - -func (x *TestIamPermissionsResponse) Reset() { - *x = TestIamPermissionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TestIamPermissionsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TestIamPermissionsResponse) ProtoMessage() {} - -func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. -func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { - return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} -} - -func (x *TestIamPermissionsResponse) GetPermissions() []string { - if x != nil { - return x.Permissions - } - return nil -} - -var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor - -var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, - 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, - 0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, - 0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, - 0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a, - 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, - 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, - 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, - 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, - 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, - 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, - 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once - file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc -) - -func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { - file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { - file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) - }) - return file_google_iam_v1_iam_policy_proto_rawDescData -} - -var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ - (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest - (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest - (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest - (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse - (*Policy)(nil), // 4: google.iam.v1.Policy - (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask - (*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions -} -var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ - 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy - 5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask - 6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions - 0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy - 4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy - 3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 6, // [6:9] is the sub-list for method output_type - 3, // [3:6] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_iam_policy_proto_init() } -func file_google_iam_v1_iam_policy_proto_init() { - if File_google_iam_v1_iam_policy_proto != nil { - return - } - file_google_iam_v1_options_proto_init() - file_google_iam_v1_policy_proto_init() - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetIamPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TestIamPermissionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TestIamPermissionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, - DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, - MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, - }.Build() - File_google_iam_v1_iam_policy_proto = out.File - file_google_iam_v1_iam_policy_proto_rawDesc = nil - file_google_iam_v1_iam_policy_proto_goTypes = nil - file_google_iam_v1_iam_policy_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// IAMPolicyClient is the client API for IAMPolicy service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IAMPolicyClient interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - // - // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. - SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a `NOT_FOUND` error. - // - // Note: This operation is designed to be used for building permission-aware - // UIs and command-line tools, not for authorization checking. This operation - // may "fail open" without warning. - TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) -} - -type iAMPolicyClient struct { - cc grpc.ClientConnInterface -} - -func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { - return &iAMPolicyClient{cc} -} - -func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) { - out := new(Policy) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) { - out := new(TestIamPermissionsResponse) - err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IAMPolicyServer is the server API for IAMPolicy service. -type IAMPolicyServer interface { - // Sets the access control policy on the specified resource. Replaces any - // existing policy. - // - // Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors. - SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) - // Gets the access control policy for a resource. - // Returns an empty policy if the resource exists and does not have a policy - // set. - GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) - // Returns permissions that a caller has on the specified resource. - // If the resource does not exist, this will return an empty set of - // permissions, not a `NOT_FOUND` error. - // - // Note: This operation is designed to be used for building permission-aware - // UIs and command-line tools, not for authorization checking. This operation - // may "fail open" without warning. - TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) -} - -// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. -type UnimplementedIAMPolicyServer struct { -} - -func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") -} -func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") -} -func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") -} - -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - s.RegisterService(&_IAMPolicy_serviceDesc, srv) -} - -func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TestIamPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _IAMPolicy_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.iam.v1.IAMPolicy", - HandlerType: (*IAMPolicyServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetIamPolicy", - Handler: _IAMPolicy_SetIamPolicy_Handler, - }, - { - MethodName: "GetIamPolicy", - Handler: _IAMPolicy_GetIamPolicy_Handler, - }, - { - MethodName: "TestIamPermissions", - Handler: _IAMPolicy_TestIamPermissions_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "google/iam/v1/iam_policy.proto", -} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go deleted file mode 100644 index 3f854fe49..000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 -// source: google/iam/v1/options.proto - -package iampb - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Encapsulates settings provided to GetIamPolicy. -type GetPolicyOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The maximum policy version that will be used to format the - // policy. - // - // Valid values are 0, 1, and 3. Requests specifying an invalid value will be - // rejected. - // - // Requests for policies with any conditional role bindings must specify - // version 3. Policies with no conditional role bindings may specify any valid - // value or leave the field unset. - // - // The policy in the response might use the policy version that you specified, - // or it might use a lower policy version. For example, if you specify version - // 3, but the policy has no conditional role bindings, the response uses - // version 1. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` -} - -func (x *GetPolicyOptions) Reset() { - *x = GetPolicyOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_options_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPolicyOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPolicyOptions) ProtoMessage() {} - -func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_options_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. -func (*GetPolicyOptions) Descriptor() ([]byte, []int) { - return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} -} - -func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { - if x != nil { - return x.RequestedPolicyVersion - } - return 0 -} - -var File_google_iam_v1_options_proto protoreflect.FileDescriptor - -var file_google_iam_v1_options_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, - 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, - 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, - 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_google_iam_v1_options_proto_rawDescOnce sync.Once - file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc -) - -func file_google_iam_v1_options_proto_rawDescGZIP() []byte { - file_google_iam_v1_options_proto_rawDescOnce.Do(func() { - file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) - }) - return file_google_iam_v1_options_proto_rawDescData -} - -var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_iam_v1_options_proto_goTypes = []interface{}{ - (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions -} -var file_google_iam_v1_options_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_options_proto_init() } -func file_google_iam_v1_options_proto_init() { - if File_google_iam_v1_options_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPolicyOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_options_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_iam_v1_options_proto_goTypes, - DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, - MessageInfos: file_google_iam_v1_options_proto_msgTypes, - }.Build() - File_google_iam_v1_options_proto = out.File - file_google_iam_v1_options_proto_rawDesc = nil - file_google_iam_v1_options_proto_goTypes = nil - file_google_iam_v1_options_proto_depIdxs = nil -} diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go deleted file mode 100644 index dfc60661a..000000000 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ /dev/null @@ -1,1180 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc v4.23.2 -// source: google/iam/v1/policy.proto - -package iampb - -import ( - reflect "reflect" - sync "sync" - - expr "google.golang.org/genproto/googleapis/type/expr" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The list of valid permission types for which logging can be configured. -// Admin writes are always logged, and are not configurable. -type AuditLogConfig_LogType int32 - -const ( - // Default case. Should never be this. - AuditLogConfig_LOG_TYPE_UNSPECIFIED AuditLogConfig_LogType = 0 - // Admin reads. Example: CloudIAM getIamPolicy - AuditLogConfig_ADMIN_READ AuditLogConfig_LogType = 1 - // Data writes. Example: CloudSQL Users create - AuditLogConfig_DATA_WRITE AuditLogConfig_LogType = 2 - // Data reads. Example: CloudSQL Users list - AuditLogConfig_DATA_READ AuditLogConfig_LogType = 3 -) - -// Enum value maps for AuditLogConfig_LogType. -var ( - AuditLogConfig_LogType_name = map[int32]string{ - 0: "LOG_TYPE_UNSPECIFIED", - 1: "ADMIN_READ", - 2: "DATA_WRITE", - 3: "DATA_READ", - } - AuditLogConfig_LogType_value = map[string]int32{ - "LOG_TYPE_UNSPECIFIED": 0, - "ADMIN_READ": 1, - "DATA_WRITE": 2, - "DATA_READ": 3, - } -) - -func (x AuditLogConfig_LogType) Enum() *AuditLogConfig_LogType { - p := new(AuditLogConfig_LogType) - *p = x - return p -} - -func (x AuditLogConfig_LogType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AuditLogConfig_LogType) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() -} - -func (AuditLogConfig_LogType) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[0] -} - -func (x AuditLogConfig_LogType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AuditLogConfig_LogType.Descriptor instead. -func (AuditLogConfig_LogType) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} -} - -// The type of action performed on a Binding in a policy. -type BindingDelta_Action int32 - -const ( - // Unspecified. - BindingDelta_ACTION_UNSPECIFIED BindingDelta_Action = 0 - // Addition of a Binding. - BindingDelta_ADD BindingDelta_Action = 1 - // Removal of a Binding. - BindingDelta_REMOVE BindingDelta_Action = 2 -) - -// Enum value maps for BindingDelta_Action. -var ( - BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x BindingDelta_Action) Enum() *BindingDelta_Action { - p := new(BindingDelta_Action) - *p = x - return p -} - -func (x BindingDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() -} - -func (BindingDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[1] -} - -func (x BindingDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use BindingDelta_Action.Descriptor instead. -func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5, 0} -} - -// The type of action performed on an audit configuration in a policy. -type AuditConfigDelta_Action int32 - -const ( - // Unspecified. - AuditConfigDelta_ACTION_UNSPECIFIED AuditConfigDelta_Action = 0 - // Addition of an audit configuration. - AuditConfigDelta_ADD AuditConfigDelta_Action = 1 - // Removal of an audit configuration. - AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 -) - -// Enum value maps for AuditConfigDelta_Action. -var ( - AuditConfigDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", - } - AuditConfigDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, - } -) - -func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { - p := new(AuditConfigDelta_Action) - *p = x - return p -} - -func (x AuditConfigDelta_Action) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { - return file_google_iam_v1_policy_proto_enumTypes[2].Descriptor() -} - -func (AuditConfigDelta_Action) Type() protoreflect.EnumType { - return &file_google_iam_v1_policy_proto_enumTypes[2] -} - -func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. -func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6, 0} -} - -// An Identity and Access Management (IAM) policy, which specifies access -// controls for Google Cloud resources. -// -// A `Policy` is a collection of `bindings`. A `binding` binds one or more -// `members`, or principals, to a single `role`. Principals can be user -// accounts, service accounts, Google groups, and domains (such as G Suite). A -// `role` is a named list of permissions; each `role` can be an IAM predefined -// role or a user-created custom role. -// -// For some types of Google Cloud resources, a `binding` can also specify a -// `condition`, which is a logical expression that allows access to a resource -// only if the expression evaluates to `true`. A condition can add constraints -// based on attributes of the request, the resource, or both. To learn which -// resources support conditions in their IAM policies, see the -// [IAM -// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). -// -// **JSON example:** -// -// ``` -// -// { -// "bindings": [ -// { -// "role": "roles/resourcemanager.organizationAdmin", -// "members": [ -// "user:mike@example.com", -// "group:admins@example.com", -// "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" -// ] -// }, -// { -// "role": "roles/resourcemanager.organizationViewer", -// "members": [ -// "user:eve@example.com" -// ], -// "condition": { -// "title": "expirable access", -// "description": "Does not grant access after Sep 2020", -// "expression": "request.time < -// timestamp('2020-10-01T00:00:00.000Z')", -// } -// } -// ], -// "etag": "BwWWja0YfJA=", -// "version": 3 -// } -// -// ``` -// -// **YAML example:** -// -// ``` -// -// bindings: -// - members: -// - user:mike@example.com -// - group:admins@example.com -// - domain:google.com -// - serviceAccount:my-project-id@appspot.gserviceaccount.com -// role: roles/resourcemanager.organizationAdmin -// - members: -// - user:eve@example.com -// role: roles/resourcemanager.organizationViewer -// condition: -// title: expirable access -// description: Does not grant access after Sep 2020 -// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') -// etag: BwWWja0YfJA= -// version: 3 -// -// ``` -// -// For a description of IAM and its features, see the -// [IAM documentation](https://cloud.google.com/iam/docs/). -type Policy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies the format of the policy. - // - // Valid values are `0`, `1`, and `3`. Requests that specify an invalid value - // are rejected. - // - // Any operation that affects conditional role bindings must specify version - // `3`. This requirement applies to the following operations: - // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions - // - // **Important:** If you use IAM Conditions, you must include the `etag` field - // whenever you call `setIamPolicy`. If you omit this field, then IAM allows - // you to overwrite a version `3` policy with a version `1` policy, and all of - // the conditions in the version `3` policy are lost. - // - // If a policy does not include any conditions, operations on that policy may - // specify any valid version or leave the field unset. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Associates a list of `members`, or principals, with a `role`. Optionally, - // may specify a `condition` that determines how and when the `bindings` are - // applied. Each of the `bindings` must contain at least one principal. - // - // The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 - // of these principals can be Google groups. Each occurrence of a principal - // counts towards these limits. For example, if the `bindings` grant 50 - // different roles to `user:alice@example.com`, and not to any other - // principal, then you can add another 1,450 principals to the `bindings` in - // the `Policy`. - Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` - // Specifies cloud audit logging configuration for this policy. - AuditConfigs []*AuditConfig `protobuf:"bytes,6,rep,name=audit_configs,json=auditConfigs,proto3" json:"audit_configs,omitempty"` - // `etag` is used for optimistic concurrency control as a way to help - // prevent simultaneous updates of a policy from overwriting each other. - // It is strongly suggested that systems make use of the `etag` in the - // read-modify-write cycle to perform policy updates in order to avoid race - // conditions: An `etag` is returned in the response to `getIamPolicy`, and - // systems are expected to put that etag in the request to `setIamPolicy` to - // ensure that their change will be applied to the same version of the policy. - // - // **Important:** If you use IAM Conditions, you must include the `etag` field - // whenever you call `setIamPolicy`. If you omit this field, then IAM allows - // you to overwrite a version `3` policy with a version `1` policy, and all of - // the conditions in the version `3` policy are lost. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *Policy) Reset() { - *x = Policy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Policy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Policy) ProtoMessage() {} - -func (x *Policy) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Policy.ProtoReflect.Descriptor instead. -func (*Policy) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} -} - -func (x *Policy) GetVersion() int32 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *Policy) GetBindings() []*Binding { - if x != nil { - return x.Bindings - } - return nil -} - -func (x *Policy) GetAuditConfigs() []*AuditConfig { - if x != nil { - return x.AuditConfigs - } - return nil -} - -func (x *Policy) GetEtag() []byte { - if x != nil { - return x.Etag - } - return nil -} - -// Associates `members`, or principals, with a `role`. -type Binding struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Role that is assigned to the list of `members`, or principals. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the principals requesting access for a Google Cloud resource. - // `members` can have the following values: - // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. - // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. - // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . - // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. - // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. - // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. - Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - // The condition that is associated with this binding. - // - // If the condition evaluates to `true`, then this binding applies to the - // current request. - // - // If the condition evaluates to `false`, then this binding does not apply to - // the current request. However, a different role binding might grant the same - // role to one or more of the principals in this binding. - // - // To learn which resources support conditions in their IAM policies, see the - // [IAM - // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). - Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *Binding) Reset() { - *x = Binding{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Binding) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Binding) ProtoMessage() {} - -func (x *Binding) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Binding.ProtoReflect.Descriptor instead. -func (*Binding) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} -} - -func (x *Binding) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *Binding) GetMembers() []string { - if x != nil { - return x.Members - } - return nil -} - -func (x *Binding) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// Specifies the audit configuration for a service. -// The configuration determines which permission types are logged, and what -// identities, if any, are exempted from logging. -// An AuditConfig must have one or more AuditLogConfigs. -// -// If there are AuditConfigs for both `allServices` and a specific service, -// the union of the two AuditConfigs is used for that service: the log_types -// specified in each AuditConfig are enabled, and the exempted_members in each -// AuditLogConfig are exempted. -// -// Example Policy with multiple AuditConfigs: -// -// { -// "audit_configs": [ -// { -// "service": "allServices", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// }, -// { -// "log_type": "ADMIN_READ" -// } -// ] -// }, -// { -// "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ" -// }, -// { -// "log_type": "DATA_WRITE", -// "exempted_members": [ -// "user:aliya@example.com" -// ] -// } -// ] -// } -// ] -// } -// -// For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts `jose@example.com` from DATA_READ logging, and -// `aliya@example.com` from DATA_WRITE logging. -type AuditConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Specifies a service that will be enabled for audit logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - // The configuration for logging of each type of permission. - AuditLogConfigs []*AuditLogConfig `protobuf:"bytes,3,rep,name=audit_log_configs,json=auditLogConfigs,proto3" json:"audit_log_configs,omitempty"` -} - -func (x *AuditConfig) Reset() { - *x = AuditConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditConfig) ProtoMessage() {} - -func (x *AuditConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditConfig.ProtoReflect.Descriptor instead. -func (*AuditConfig) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} -} - -func (x *AuditConfig) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *AuditConfig) GetAuditLogConfigs() []*AuditLogConfig { - if x != nil { - return x.AuditLogConfigs - } - return nil -} - -// Provides the configuration for logging a type of permissions. -// Example: -// -// { -// "audit_log_configs": [ -// { -// "log_type": "DATA_READ", -// "exempted_members": [ -// "user:jose@example.com" -// ] -// }, -// { -// "log_type": "DATA_WRITE" -// } -// ] -// } -// -// This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting -// jose@example.com from DATA_READ logging. -type AuditLogConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The log type that this config enables. - LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` - // Specifies the identities that do not cause logging for this type of - // permission. - // Follows the same format of - // [Binding.members][google.iam.v1.Binding.members]. - ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` -} - -func (x *AuditLogConfig) Reset() { - *x = AuditLogConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditLogConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditLogConfig) ProtoMessage() {} - -func (x *AuditLogConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditLogConfig.ProtoReflect.Descriptor instead. -func (*AuditLogConfig) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} -} - -func (x *AuditLogConfig) GetLogType() AuditLogConfig_LogType { - if x != nil { - return x.LogType - } - return AuditLogConfig_LOG_TYPE_UNSPECIFIED -} - -func (x *AuditLogConfig) GetExemptedMembers() []string { - if x != nil { - return x.ExemptedMembers - } - return nil -} - -// The difference delta between two policies. -type PolicyDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The delta for Bindings between two policies. - BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` - // The delta for AuditConfigs between two policies. - AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` -} - -func (x *PolicyDelta) Reset() { - *x = PolicyDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PolicyDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PolicyDelta) ProtoMessage() {} - -func (x *PolicyDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. -func (*PolicyDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} -} - -func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if x != nil { - return x.BindingDeltas - } - return nil -} - -func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { - if x != nil { - return x.AuditConfigDeltas - } - return nil -} - -// One delta entry for Binding. Each individual change (only one member in each -// entry) to a binding will be a separate entry. -type BindingDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on a Binding. - // Required - Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` - // Role that is assigned to `members`. - // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. - // Required - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Google Cloud resource. - // Follows the same format of Binding.members. - // Required - Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` - // The condition that is associated with this binding. - Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *BindingDelta) Reset() { - *x = BindingDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BindingDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BindingDelta) ProtoMessage() {} - -func (x *BindingDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. -func (*BindingDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{5} -} - -func (x *BindingDelta) GetAction() BindingDelta_Action { - if x != nil { - return x.Action - } - return BindingDelta_ACTION_UNSPECIFIED -} - -func (x *BindingDelta) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *BindingDelta) GetMember() string { - if x != nil { - return x.Member - } - return "" -} - -func (x *BindingDelta) GetCondition() *expr.Expr { - if x != nil { - return x.Condition - } - return nil -} - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -type AuditConfigDelta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action that was performed on an audit configuration in a policy. - // Required - Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` - // Specifies a service that was configured for Cloud Audit Logging. - // For example, `storage.googleapis.com`, `cloudsql.googleapis.com`. - // `allServices` is a special value that covers all services. - // Required - Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` - // A single identity that is exempted from "data access" audit - // logging for the `service` specified above. - // Follows the same format of Binding.members. - ExemptedMember string `protobuf:"bytes,3,opt,name=exempted_member,json=exemptedMember,proto3" json:"exempted_member,omitempty"` - // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always - // enabled, and cannot be configured. - // Required - LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` -} - -func (x *AuditConfigDelta) Reset() { - *x = AuditConfigDelta{} - if protoimpl.UnsafeEnabled { - mi := &file_google_iam_v1_policy_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuditConfigDelta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuditConfigDelta) ProtoMessage() {} - -func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { - mi := &file_google_iam_v1_policy_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. -func (*AuditConfigDelta) Descriptor() ([]byte, []int) { - return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{6} -} - -func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { - if x != nil { - return x.Action - } - return AuditConfigDelta_ACTION_UNSPECIFIED -} - -func (x *AuditConfigDelta) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *AuditConfigDelta) GetExemptedMember() string { - if x != nil { - return x.ExemptedMember - } - return "" -} - -func (x *AuditConfigDelta) GetLogType() string { - if x != nil { - return x.LogType - } - return "" -} - -var File_google_iam_v1_policy_proto protoreflect.FileDescriptor - -var file_google_iam_v1_policy_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x01, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3f, 0x0a, 0x0d, - 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0c, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x22, 0x68, 0x0a, 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x72, 0x0a, 0x0b, 0x41, - 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x5f, 0x6c, 0x6f, - 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, - 0x61, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, - 0xd1, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x40, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6c, 0x6f, 0x67, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, - 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x22, - 0x52, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x4c, 0x4f, - 0x47, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x5f, 0x52, 0x45, - 0x41, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x41, - 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x75, 0x64, 0x69, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x0c, 0x42, 0x69, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x06, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, - 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, - 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, 0x0a, 0x10, 0x41, 0x75, - 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3e, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x6d, - 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x22, 0x35, 0x0a, 0x06, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, - 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_iam_v1_policy_proto_rawDescOnce sync.Once - file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc -) - -func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { - file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { - file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) - }) - return file_google_iam_v1_policy_proto_rawDescData -} - -var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_google_iam_v1_policy_proto_goTypes = []interface{}{ - (AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType - (BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action - (AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action - (*Policy)(nil), // 3: google.iam.v1.Policy - (*Binding)(nil), // 4: google.iam.v1.Binding - (*AuditConfig)(nil), // 5: google.iam.v1.AuditConfig - (*AuditLogConfig)(nil), // 6: google.iam.v1.AuditLogConfig - (*PolicyDelta)(nil), // 7: google.iam.v1.PolicyDelta - (*BindingDelta)(nil), // 8: google.iam.v1.BindingDelta - (*AuditConfigDelta)(nil), // 9: google.iam.v1.AuditConfigDelta - (*expr.Expr)(nil), // 10: google.type.Expr -} -var file_google_iam_v1_policy_proto_depIdxs = []int32{ - 4, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding - 5, // 1: google.iam.v1.Policy.audit_configs:type_name -> google.iam.v1.AuditConfig - 10, // 2: google.iam.v1.Binding.condition:type_name -> google.type.Expr - 6, // 3: google.iam.v1.AuditConfig.audit_log_configs:type_name -> google.iam.v1.AuditLogConfig - 0, // 4: google.iam.v1.AuditLogConfig.log_type:type_name -> google.iam.v1.AuditLogConfig.LogType - 8, // 5: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta - 9, // 6: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta - 1, // 7: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action - 10, // 8: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr - 2, // 9: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name -} - -func init() { file_google_iam_v1_policy_proto_init() } -func file_google_iam_v1_policy_proto_init() { - if File_google_iam_v1_policy_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Policy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Binding); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditLogConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PolicyDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BindingDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuditConfigDelta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, - NumEnums: 3, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_iam_v1_policy_proto_goTypes, - DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, - EnumInfos: file_google_iam_v1_policy_proto_enumTypes, - MessageInfos: file_google_iam_v1_policy_proto_msgTypes, - }.Build() - File_google_iam_v1_policy_proto = out.File - file_google_iam_v1_policy_proto_rawDesc = nil - file_google_iam_v1_policy_proto_goTypes = nil - file_google_iam_v1_policy_proto_depIdxs = nil -} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go deleted file mode 100644 index f004a7afb..000000000 --- a/vendor/cloud.google.com/go/iam/iam.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package iam supports the resource-specific operations of Google Cloud -// IAM (Identity and Access Management) for the Google Cloud Libraries. -// See https://cloud.google.com/iam for more about IAM. -// -// Users of the Google Cloud Libraries will typically not use this package -// directly. Instead they will begin with some resource that supports IAM, like -// a pubsub topic, and call its IAM method to get a Handle for that resource. -package iam - -import ( - "context" - "fmt" - "time" - - pb "cloud.google.com/go/iam/apiv1/iampb" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -// client abstracts the IAMPolicy API to allow multiple implementations. -type client interface { - Get(ctx context.Context, resource string) (*pb.Policy, error) - Set(ctx context.Context, resource string, p *pb.Policy) error - Test(ctx context.Context, resource string, perms []string) ([]string, error) - GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) -} - -// grpcClient implements client for the standard gRPC-based IAMPolicy service. -type grpcClient struct { - c pb.IAMPolicyClient -} - -var withRetry = gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 100 * time.Millisecond, - Max: 60 * time.Second, - Multiplier: 1.3, - }) -}) - -func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { - return g.GetWithVersion(ctx, resource, 1) -} - -func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { - var proto *pb.Policy - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - var err error - proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ - Resource: resource, - Options: &pb.GetPolicyOptions{ - RequestedPolicyVersion: requestedPolicyVersion, - }, - }) - return err - }, withRetry) - if err != nil { - return nil, err - } - return proto, nil -} - -func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ - Resource: resource, - Policy: p, - }) - return err - }, withRetry) -} - -func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { - var res *pb.TestIamPermissionsResponse - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) - ctx = insertMetadata(ctx, md) - - err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { - var err error - res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ - Resource: resource, - Permissions: perms, - }) - return err - }, withRetry) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// A Handle provides IAM operations for a resource. -type Handle struct { - c client - resource string -} - -// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). -type Handle3 struct { - c client - resource string - version int32 -} - -// InternalNewHandle is for use by the Google Cloud Libraries only. -// -// InternalNewHandle returns a Handle for resource. -// The conn parameter refers to a server that must support the IAMPolicy service. -func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { - return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) -} - -// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. -// -// InternalNewHandleClient returns a Handle for resource using the given -// grpc service that implements IAM as a mixin -func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { - return InternalNewHandleClient(&grpcClient{c: c}, resource) -} - -// InternalNewHandleClient is for use by the Google Cloud Libraries only. -// -// InternalNewHandleClient returns a Handle for resource using the given -// client implementation. -func InternalNewHandleClient(c client, resource string) *Handle { - return &Handle{ - c: c, - resource: resource, - } -} - -// V3 returns a Handle3, which is like Handle except it sets -// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 -// when storing a policy. -func (h *Handle) V3() *Handle3 { - return &Handle3{ - c: h.c, - resource: h.resource, - version: 3, - } -} - -// Policy retrieves the IAM policy for the resource. -func (h *Handle) Policy(ctx context.Context) (*Policy, error) { - proto, err := h.c.Get(ctx, h.resource) - if err != nil { - return nil, err - } - return &Policy{InternalProto: proto}, nil -} - -// SetPolicy replaces the resource's current policy with the supplied Policy. -// -// If policy was created from a prior call to Get, then the modification will -// only succeed if the policy has not changed since the Get. -func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { - return h.c.Set(ctx, h.resource, policy.InternalProto) -} - -// TestPermissions returns the subset of permissions that the caller has on the resource. -func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { - return h.c.Test(ctx, h.resource, permissions) -} - -// A RoleName is a name representing a collection of permissions. -type RoleName string - -// Common role names. -const ( - Owner RoleName = "roles/owner" - Editor RoleName = "roles/editor" - Viewer RoleName = "roles/viewer" -) - -const ( - // AllUsers is a special member that denotes all users, even unauthenticated ones. - AllUsers = "allUsers" - - // AllAuthenticatedUsers is a special member that denotes all authenticated users. - AllAuthenticatedUsers = "allAuthenticatedUsers" -) - -// A Policy is a list of Bindings representing roles -// granted to members. -// -// The zero Policy is a valid policy with no bindings. -type Policy struct { - // TODO(jba): when type aliases are available, put Policy into an internal package - // and provide an exported alias here. - - // This field is exported for use by the Google Cloud Libraries only. - // It may become unexported in a future release. - InternalProto *pb.Policy -} - -// Members returns the list of members with the supplied role. -// The return value should not be modified. Use Add and Remove -// to modify the members of a role. -func (p *Policy) Members(r RoleName) []string { - b := p.binding(r) - if b == nil { - return nil - } - return b.Members -} - -// HasRole reports whether member has role r. -func (p *Policy) HasRole(member string, r RoleName) bool { - return memberIndex(member, p.binding(r)) >= 0 -} - -// Add adds member member to role r if it is not already present. -// A new binding is created if there is no binding for the role. -func (p *Policy) Add(member string, r RoleName) { - b := p.binding(r) - if b == nil { - if p.InternalProto == nil { - p.InternalProto = &pb.Policy{} - } - p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ - Role: string(r), - Members: []string{member}, - }) - return - } - if memberIndex(member, b) < 0 { - b.Members = append(b.Members, member) - return - } -} - -// Remove removes member from role r if it is present. -func (p *Policy) Remove(member string, r RoleName) { - bi := p.bindingIndex(r) - if bi < 0 { - return - } - bindings := p.InternalProto.Bindings - b := bindings[bi] - mi := memberIndex(member, b) - if mi < 0 { - return - } - // Order doesn't matter for bindings or members, so to remove, move the last item - // into the removed spot and shrink the slice. - if len(b.Members) == 1 { - // Remove binding. - last := len(bindings) - 1 - bindings[bi] = bindings[last] - bindings[last] = nil - p.InternalProto.Bindings = bindings[:last] - return - } - // Remove member. - // TODO(jba): worry about multiple copies of m? - last := len(b.Members) - 1 - b.Members[mi] = b.Members[last] - b.Members[last] = "" - b.Members = b.Members[:last] -} - -// Roles returns the names of all the roles that appear in the Policy. -func (p *Policy) Roles() []RoleName { - if p.InternalProto == nil { - return nil - } - var rns []RoleName - for _, b := range p.InternalProto.Bindings { - rns = append(rns, RoleName(b.Role)) - } - return rns -} - -// binding returns the Binding for the suppied role, or nil if there isn't one. -func (p *Policy) binding(r RoleName) *pb.Binding { - i := p.bindingIndex(r) - if i < 0 { - return nil - } - return p.InternalProto.Bindings[i] -} - -func (p *Policy) bindingIndex(r RoleName) int { - if p.InternalProto == nil { - return -1 - } - for i, b := range p.InternalProto.Bindings { - if b.Role == string(r) { - return i - } - } - return -1 -} - -// memberIndex returns the index of m in b's Members, or -1 if not found. -func memberIndex(m string, b *pb.Binding) int { - if b == nil { - return -1 - } - for i, mm := range b.Members { - if mm == m { - return i - } - } - return -1 -} - -// insertMetadata inserts metadata into the given context -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -// A Policy3 is a list of Bindings representing roles granted to members. -// -// The zero Policy3 is a valid policy with no bindings. -// -// It is similar to a Policy, except a Policy3 provides direct access to the -// list of Bindings. -// -// The policy version is always set to 3. -type Policy3 struct { - etag []byte - Bindings []*pb.Binding -} - -// Policy retrieves the IAM policy for the resource. -// -// requestedPolicyVersion is always set to 3. -func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { - proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) - if err != nil { - return nil, err - } - return &Policy3{ - Bindings: proto.Bindings, - etag: proto.Etag, - }, nil -} - -// SetPolicy replaces the resource's current policy with the supplied Policy. -// -// If policy was created from a prior call to Get, then the modification will -// only succeed if the policy has not changed since the Get. -func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { - return h.c.Set(ctx, h.resource, &pb.Policy{ - Bindings: policy.Bindings, - Etag: policy.etag, - Version: h.version, - }) -} - -// TestPermissions returns the subset of permissions that the caller has on the resource. -func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { - return h.c.Test(ctx, h.resource, permissions) -} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json deleted file mode 100644 index c9a57ccdd..000000000 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ /dev/null @@ -1,2712 +0,0 @@ -{ - "cloud.google.com/go/accessapproval/apiv1": { - "api_shortname": "accessapproval", - "distribution_name": "cloud.google.com/go/accessapproval/apiv1", - "description": "Access Approval API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/accesscontextmanager/apiv1": { - "api_shortname": "accesscontextmanager", - "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", - "description": "Access Context Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/advisorynotifications/apiv1": { - "api_shortname": "advisorynotifications", - "distribution_name": "cloud.google.com/go/advisorynotifications/apiv1", - "description": "Advisory Notifications API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/advisorynotifications/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/ai/generativelanguage/apiv1": { - "api_shortname": "generativelanguage", - "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1", - "description": "Generative Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/ai/generativelanguage/apiv1beta": { - "api_shortname": "generativelanguage", - "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta", - "description": "Generative Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/ai/generativelanguage/apiv1beta2": { - "api_shortname": "generativelanguage", - "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2", - "description": "Generative Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/aiplatform/apiv1": { - "api_shortname": "aiplatform", - "distribution_name": "cloud.google.com/go/aiplatform/apiv1", - "description": "Vertex AI API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/aiplatform/apiv1beta1": { - "api_shortname": "aiplatform", - "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", - "description": "Vertex AI API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1": { - "api_shortname": "alloydb", - "distribution_name": "cloud.google.com/go/alloydb/apiv1", - "description": "AlloyDB API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1alpha": { - "api_shortname": "alloydb", - "distribution_name": "cloud.google.com/go/alloydb/apiv1alpha", - "description": "AlloyDB API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/apiv1beta": { - "api_shortname": "alloydb", - "distribution_name": "cloud.google.com/go/alloydb/apiv1beta", - "description": "AlloyDB API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/connectors/apiv1": { - "api_shortname": "connectors", - "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1", - "description": "AlloyDB connectors", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/connectors/apiv1alpha": { - "api_shortname": "connectors", - "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", - "description": "AlloyDB connectors", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/alloydb/connectors/apiv1beta": { - "api_shortname": "connectors", - "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta", - "description": "AlloyDB connectors", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/analytics/admin/apiv1alpha": { - "api_shortname": "analyticsadmin", - "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", - "description": "Google Analytics Admin API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigateway/apiv1": { - "api_shortname": "apigateway", - "distribution_name": "cloud.google.com/go/apigateway/apiv1", - "description": "API Gateway API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigeeconnect/apiv1": { - "api_shortname": "apigeeconnect", - "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", - "description": "Apigee Connect API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apigeeregistry/apiv1": { - "api_shortname": "apigeeregistry", - "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", - "description": "Apigee Registry API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apikeys/apiv2": { - "api_shortname": "apikeys", - "distribution_name": "cloud.google.com/go/apikeys/apiv2", - "description": "API Keys API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/appengine/apiv1": { - "api_shortname": "appengine", - "distribution_name": "cloud.google.com/go/appengine/apiv1", - "description": "App Engine Admin API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apps/events/subscriptions/apiv1": { - "api_shortname": "workspaceevents", - "distribution_name": "cloud.google.com/go/apps/events/subscriptions/apiv1", - "description": "Google Workspace Events API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/events/subscriptions/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apps/meet/apiv2": { - "api_shortname": "meet", - "distribution_name": "cloud.google.com/go/apps/meet/apiv2", - "description": "Google Meet API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/apps/meet/apiv2beta": { - "api_shortname": "meet", - "distribution_name": "cloud.google.com/go/apps/meet/apiv2beta", - "description": "Google Meet API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/area120/tables/apiv1alpha1": { - "api_shortname": "area120tables", - "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", - "description": "Area120 Tables API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/artifactregistry/apiv1": { - "api_shortname": "artifactregistry", - "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", - "description": "Artifact Registry API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/artifactregistry/apiv1beta2": { - "api_shortname": "artifactregistry", - "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", - "description": "Artifact Registry API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1": { - "api_shortname": "cloudasset", - "distribution_name": "cloud.google.com/go/asset/apiv1", - "description": "Cloud Asset API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1p2beta1": { - "api_shortname": "cloudasset", - "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", - "description": "Cloud Asset API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/asset/apiv1p5beta1": { - "api_shortname": "cloudasset", - "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", - "description": "Cloud Asset API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/assuredworkloads/apiv1": { - "api_shortname": "assuredworkloads", - "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", - "description": "Assured Workloads API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/assuredworkloads/apiv1beta1": { - "api_shortname": "assuredworkloads", - "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", - "description": "Assured Workloads API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/automl/apiv1": { - "api_shortname": "automl", - "distribution_name": "cloud.google.com/go/automl/apiv1", - "description": "Cloud AutoML API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/automl/apiv1beta1": { - "api_shortname": "automl", - "distribution_name": "cloud.google.com/go/automl/apiv1beta1", - "description": "Cloud AutoML API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/baremetalsolution/apiv2": { - "api_shortname": "baremetalsolution", - "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", - "description": "Bare Metal Solution API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/batch/apiv1": { - "api_shortname": "batch", - "distribution_name": "cloud.google.com/go/batch/apiv1", - "description": "Batch API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appconnections/apiv1": { - "api_shortname": "beyondcorp", - "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", - "description": "BeyondCorp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { - "api_shortname": "beyondcorp", - "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", - "description": "BeyondCorp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/appgateways/apiv1": { - "api_shortname": "beyondcorp", - "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", - "description": "BeyondCorp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { - "api_shortname": "beyondcorp", - "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", - "description": "BeyondCorp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { - "api_shortname": "beyondcorp", - "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", - "description": "BeyondCorp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery": { - "api_shortname": "bigquery", - "distribution_name": "cloud.google.com/go/bigquery", - "description": "BigQuery", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/bigquery/analyticshub/apiv1": { - "api_shortname": "analyticshub", - "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", - "description": "Analytics Hub API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/biglake/apiv1": { - "api_shortname": "biglake", - "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", - "description": "BigLake API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { - "api_shortname": "biglake", - "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", - "description": "BigLake API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/connection/apiv1": { - "api_shortname": "bigqueryconnection", - "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", - "description": "BigQuery Connection API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/connection/apiv1beta1": { - "api_shortname": "bigqueryconnection", - "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", - "description": "BigQuery Connection API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { - "api_shortname": "analyticshub", - "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", - "description": "Analytics Hub API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datapolicies/apiv1": { - "api_shortname": "bigquerydatapolicy", - "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", - "description": "BigQuery Data Policy API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { - "api_shortname": "bigquerydatapolicy", - "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", - "description": "BigQuery Data Policy API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/datatransfer/apiv1": { - "api_shortname": "bigquerydatatransfer", - "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", - "description": "BigQuery Data Transfer API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/migration/apiv2": { - "api_shortname": "bigquerymigration", - "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", - "description": "BigQuery Migration API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/migration/apiv2alpha": { - "api_shortname": "bigquerymigration", - "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", - "description": "BigQuery Migration API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/reservation/apiv1": { - "api_shortname": "bigqueryreservation", - "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", - "description": "BigQuery Reservation API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1": { - "api_shortname": "bigquerystorage", - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", - "description": "BigQuery Storage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1beta1": { - "api_shortname": "bigquerystorage", - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", - "description": "BigQuery Storage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigquery/storage/apiv1beta2": { - "api_shortname": "bigquerystorage", - "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", - "description": "BigQuery Storage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/bigtable": { - "api_shortname": "bigtable", - "distribution_name": "cloud.google.com/go/bigtable", - "description": "Cloud BigTable", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/billing/apiv1": { - "api_shortname": "cloudbilling", - "distribution_name": "cloud.google.com/go/billing/apiv1", - "description": "Cloud Billing API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/billing/budgets/apiv1": { - "api_shortname": "billingbudgets", - "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", - "description": "Cloud Billing Budget API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/billing/budgets/apiv1beta1": { - "api_shortname": "billingbudgets", - "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", - "description": "Cloud Billing Budget API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/binaryauthorization/apiv1": { - "api_shortname": "binaryauthorization", - "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", - "description": "Binary Authorization API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/binaryauthorization/apiv1beta1": { - "api_shortname": "binaryauthorization", - "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", - "description": "Binary Authorization API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/certificatemanager/apiv1": { - "api_shortname": "certificatemanager", - "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", - "description": "Certificate Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/channel/apiv1": { - "api_shortname": "cloudchannel", - "distribution_name": "cloud.google.com/go/channel/apiv1", - "description": "Cloud Channel API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudbuild/apiv1/v2": { - "api_shortname": "cloudbuild", - "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", - "description": "Cloud Build API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudbuild/apiv2": { - "api_shortname": "cloudbuild", - "distribution_name": "cloud.google.com/go/cloudbuild/apiv2", - "description": "Cloud Build API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/clouddms/apiv1": { - "api_shortname": "datamigration", - "distribution_name": "cloud.google.com/go/clouddms/apiv1", - "description": "Database Migration API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudprofiler/apiv2": { - "api_shortname": "cloudprofiler", - "distribution_name": "cloud.google.com/go/cloudprofiler/apiv2", - "description": "Cloud Profiler API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudprofiler/latest/apiv2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudquotas/apiv1": { - "api_shortname": "cloudquotas", - "distribution_name": "cloud.google.com/go/cloudquotas/apiv1", - "description": "Cloud Quotas API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2": { - "api_shortname": "cloudtasks", - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", - "description": "Cloud Tasks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2beta2": { - "api_shortname": "cloudtasks", - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", - "description": "Cloud Tasks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/cloudtasks/apiv2beta3": { - "api_shortname": "cloudtasks", - "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", - "description": "Cloud Tasks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/commerce/consumer/procurement/apiv1": { - "api_shortname": "cloudcommerceconsumerprocurement", - "distribution_name": "cloud.google.com/go/commerce/consumer/procurement/apiv1", - "description": "Cloud Commerce Consumer Procurement API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/compute/apiv1": { - "api_shortname": "compute", - "distribution_name": "cloud.google.com/go/compute/apiv1", - "description": "Google Compute Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/compute/metadata": { - "api_shortname": "compute-metadata", - "distribution_name": "cloud.google.com/go/compute/metadata", - "description": "Service Metadata API", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", - "release_level": "stable", - "library_type": "CORE" - }, - "cloud.google.com/go/confidentialcomputing/apiv1": { - "api_shortname": "confidentialcomputing", - "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1", - "description": "Confidential Computing API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { - "api_shortname": "confidentialcomputing", - "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1alpha1", - "description": "Confidential Computing API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1alpha1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/config/apiv1": { - "api_shortname": "config", - "distribution_name": "cloud.google.com/go/config/apiv1", - "description": "Infrastructure Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/contactcenterinsights/apiv1": { - "api_shortname": "contactcenterinsights", - "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", - "description": "Contact Center AI Insights API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/container/apiv1": { - "api_shortname": "container", - "distribution_name": "cloud.google.com/go/container/apiv1", - "description": "Kubernetes Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/containeranalysis/apiv1beta1": { - "api_shortname": "containeranalysis", - "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", - "description": "Container Analysis API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/apiv1": { - "api_shortname": "datacatalog", - "distribution_name": "cloud.google.com/go/datacatalog/apiv1", - "description": "Google Cloud Data Catalog API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/apiv1beta1": { - "api_shortname": "datacatalog", - "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", - "description": "Google Cloud Data Catalog API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datacatalog/lineage/apiv1": { - "api_shortname": "datalineage", - "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", - "description": "Data Lineage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataflow/apiv1beta3": { - "api_shortname": "dataflow", - "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", - "description": "Dataflow API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataform/apiv1alpha2": { - "api_shortname": "dataform", - "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", - "description": "Dataform API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataform/apiv1beta1": { - "api_shortname": "dataform", - "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", - "description": "Dataform API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datafusion/apiv1": { - "api_shortname": "datafusion", - "distribution_name": "cloud.google.com/go/datafusion/apiv1", - "description": "Cloud Data Fusion API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datalabeling/apiv1beta1": { - "api_shortname": "datalabeling", - "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", - "description": "Data Labeling API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataplex/apiv1": { - "api_shortname": "dataplex", - "distribution_name": "cloud.google.com/go/dataplex/apiv1", - "description": "Cloud Dataplex API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataproc/v2/apiv1": { - "api_shortname": "dataproc", - "distribution_name": "cloud.google.com/go/dataproc/v2/apiv1", - "description": "Cloud Dataproc API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/v2/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dataqna/apiv1alpha": { - "api_shortname": "dataqna", - "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", - "description": "Data QnA API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastore": { - "api_shortname": "datastore", - "distribution_name": "cloud.google.com/go/datastore", - "description": "Cloud Datastore", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/datastore/admin/apiv1": { - "api_shortname": "datastore", - "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", - "description": "Cloud Datastore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastream/apiv1": { - "api_shortname": "datastream", - "distribution_name": "cloud.google.com/go/datastream/apiv1", - "description": "Datastream API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/datastream/apiv1alpha1": { - "api_shortname": "datastream", - "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", - "description": "Datastream API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/debugger/apiv2": { - "api_shortname": "clouddebugger", - "distribution_name": "cloud.google.com/go/debugger/apiv2", - "description": "Stackdriver Debugger API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/deploy/apiv1": { - "api_shortname": "clouddeploy", - "distribution_name": "cloud.google.com/go/deploy/apiv1", - "description": "Cloud Deploy API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/apiv2": { - "api_shortname": "dialogflow", - "distribution_name": "cloud.google.com/go/dialogflow/apiv2", - "description": "Dialogflow API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/apiv2beta1": { - "api_shortname": "dialogflow", - "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", - "description": "Dialogflow API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/cx/apiv3": { - "api_shortname": "dialogflow", - "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", - "description": "Dialogflow API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dialogflow/cx/apiv3beta1": { - "api_shortname": "dialogflow", - "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", - "description": "Dialogflow API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/discoveryengine/apiv1": { - "api_shortname": "discoveryengine", - "distribution_name": "cloud.google.com/go/discoveryengine/apiv1", - "description": "Discovery Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/discoveryengine/apiv1alpha": { - "api_shortname": "discoveryengine", - "distribution_name": "cloud.google.com/go/discoveryengine/apiv1alpha", - "description": "Discovery Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/discoveryengine/apiv1beta": { - "api_shortname": "discoveryengine", - "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", - "description": "Discovery Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/dlp/apiv2": { - "api_shortname": "dlp", - "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP)", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/documentai/apiv1": { - "api_shortname": "documentai", - "distribution_name": "cloud.google.com/go/documentai/apiv1", - "description": "Cloud Document AI API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/documentai/apiv1beta3": { - "api_shortname": "documentai", - "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", - "description": "Cloud Document AI API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/domains/apiv1beta1": { - "api_shortname": "domains", - "distribution_name": "cloud.google.com/go/domains/apiv1beta1", - "description": "Cloud Domains API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/edgecontainer/apiv1": { - "api_shortname": "edgecontainer", - "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", - "description": "Distributed Cloud Edge Container API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/edgenetwork/apiv1": { - "api_shortname": "edgenetwork", - "distribution_name": "cloud.google.com/go/edgenetwork/apiv1", - "description": "Distributed Cloud Edge Network API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/errorreporting": { - "api_shortname": "clouderrorreporting", - "distribution_name": "cloud.google.com/go/errorreporting", - "description": "Cloud Error Reporting API", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", - "release_level": "preview", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/errorreporting/apiv1beta1": { - "api_shortname": "clouderrorreporting", - "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", - "description": "Error Reporting API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/essentialcontacts/apiv1": { - "api_shortname": "essentialcontacts", - "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", - "description": "Essential Contacts API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/eventarc/apiv1": { - "api_shortname": "eventarc", - "distribution_name": "cloud.google.com/go/eventarc/apiv1", - "description": "Eventarc API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/eventarc/publishing/apiv1": { - "api_shortname": "eventarcpublishing", - "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", - "description": "Eventarc Publishing API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/filestore/apiv1": { - "api_shortname": "file", - "distribution_name": "cloud.google.com/go/filestore/apiv1", - "description": "Cloud Filestore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/firestore": { - "api_shortname": "firestore", - "distribution_name": "cloud.google.com/go/firestore", - "description": "Cloud Firestore API", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/firestore/apiv1": { - "api_shortname": "firestore", - "distribution_name": "cloud.google.com/go/firestore/apiv1", - "description": "Cloud Firestore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/firestore/apiv1/admin": { - "api_shortname": "firestore", - "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", - "description": "Cloud Firestore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv1": { - "api_shortname": "cloudfunctions", - "distribution_name": "cloud.google.com/go/functions/apiv1", - "description": "Cloud Functions API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv2": { - "api_shortname": "cloudfunctions", - "distribution_name": "cloud.google.com/go/functions/apiv2", - "description": "Cloud Functions API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/apiv2beta": { - "api_shortname": "cloudfunctions", - "distribution_name": "cloud.google.com/go/functions/apiv2beta", - "description": "Cloud Functions API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/functions/metadata": { - "api_shortname": "firestore-metadata", - "distribution_name": "cloud.google.com/go/functions/metadata", - "description": "Cloud Functions", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", - "release_level": "preview", - "library_type": "CORE" - }, - "cloud.google.com/go/gkebackup/apiv1": { - "api_shortname": "gkebackup", - "distribution_name": "cloud.google.com/go/gkebackup/apiv1", - "description": "Backup for GKE API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { - "api_shortname": "connectgateway", - "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", - "description": "Connect Gateway API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkehub/apiv1beta1": { - "api_shortname": "gkehub", - "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", - "description": "GKE Hub API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gkemulticloud/apiv1": { - "api_shortname": "gkemulticloud", - "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", - "description": "Anthos Multi-Cloud API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gsuiteaddons/apiv1": { - "api_shortname": "gsuiteaddons", - "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", - "description": "Google Workspace Add-ons API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam": { - "api_shortname": "iam", - "distribution_name": "cloud.google.com/go/iam", - "description": "Cloud IAM", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", - "release_level": "stable", - "library_type": "CORE" - }, - "cloud.google.com/go/iam/apiv1": { - "api_shortname": "iam-meta-api", - "distribution_name": "cloud.google.com/go/iam/apiv1", - "description": "IAM Meta API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam/apiv2": { - "api_shortname": "iam", - "distribution_name": "cloud.google.com/go/iam/apiv2", - "description": "Identity and Access Management (IAM) API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iam/credentials/apiv1": { - "api_shortname": "iamcredentials", - "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", - "description": "IAM Service Account Credentials API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iap/apiv1": { - "api_shortname": "iap", - "distribution_name": "cloud.google.com/go/iap/apiv1", - "description": "Cloud Identity-Aware Proxy API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/ids/apiv1": { - "api_shortname": "ids", - "distribution_name": "cloud.google.com/go/ids/apiv1", - "description": "Cloud IDS API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/iot/apiv1": { - "api_shortname": "cloudiot", - "distribution_name": "cloud.google.com/go/iot/apiv1", - "description": "Cloud IoT API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/kms/apiv1": { - "api_shortname": "cloudkms", - "distribution_name": "cloud.google.com/go/kms/apiv1", - "description": "Cloud Key Management Service (KMS) API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/kms/inventory/apiv1": { - "api_shortname": "kmsinventory", - "distribution_name": "cloud.google.com/go/kms/inventory/apiv1", - "description": "KMS Inventory API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/inventory/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/language/apiv1": { - "api_shortname": "language", - "distribution_name": "cloud.google.com/go/language/apiv1", - "description": "Cloud Natural Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/language/apiv1beta2": { - "api_shortname": "language", - "distribution_name": "cloud.google.com/go/language/apiv1beta2", - "description": "Cloud Natural Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/language/apiv2": { - "api_shortname": "language", - "distribution_name": "cloud.google.com/go/language/apiv2", - "description": "Cloud Natural Language API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/lifesciences/apiv2beta": { - "api_shortname": "lifesciences", - "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", - "description": "Cloud Life Sciences API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/logging": { - "api_shortname": "logging", - "distribution_name": "cloud.google.com/go/logging", - "description": "Cloud Logging API", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/logging/apiv2": { - "api_shortname": "logging", - "distribution_name": "cloud.google.com/go/logging/apiv2", - "description": "Cloud Logging API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/longrunning/autogen": { - "api_shortname": "longrunning", - "distribution_name": "cloud.google.com/go/longrunning/autogen", - "description": "Long Running Operations API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/managedidentities/apiv1": { - "api_shortname": "managedidentities", - "distribution_name": "cloud.google.com/go/managedidentities/apiv1", - "description": "Managed Service for Microsoft Active Directory API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/addressvalidation/apiv1": { - "api_shortname": "addressvalidation", - "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", - "description": "Address Validation API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/fleetengine/apiv1": { - "api_shortname": "fleetengine", - "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", - "description": "Local Rides and Deliveries API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/fleetengine/delivery/apiv1": { - "api_shortname": "fleetengine", - "distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1", - "description": "Last Mile Fleet Solution Delivery API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "api_shortname": "mapsplatformdatasets", - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/places/apiv1": { - "api_shortname": "places", - "distribution_name": "cloud.google.com/go/maps/places/apiv1", - "description": "Places API (New)", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/maps/routing/apiv2": { - "api_shortname": "routes", - "distribution_name": "cloud.google.com/go/maps/routing/apiv2", - "description": "Routes API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/mediatranslation/apiv1beta1": { - "api_shortname": "mediatranslation", - "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", - "description": "Media Translation API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/memcache/apiv1": { - "api_shortname": "memcache", - "distribution_name": "cloud.google.com/go/memcache/apiv1", - "description": "Cloud Memorystore for Memcached API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/memcache/apiv1beta2": { - "api_shortname": "memcache", - "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", - "description": "Cloud Memorystore for Memcached API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1": { - "api_shortname": "metastore", - "distribution_name": "cloud.google.com/go/metastore/apiv1", - "description": "Dataproc Metastore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1alpha": { - "api_shortname": "metastore", - "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", - "description": "Dataproc Metastore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/metastore/apiv1beta": { - "api_shortname": "metastore", - "distribution_name": "cloud.google.com/go/metastore/apiv1beta", - "description": "Dataproc Metastore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/migrationcenter/apiv1": { - "api_shortname": "migrationcenter", - "distribution_name": "cloud.google.com/go/migrationcenter/apiv1", - "description": "Migration Center API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/apiv3/v2": { - "api_shortname": "monitoring", - "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", - "description": "Cloud Monitoring API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/dashboard/apiv1": { - "api_shortname": "monitoring", - "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", - "description": "Cloud Monitoring API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/monitoring/metricsscope/apiv1": { - "api_shortname": "monitoring", - "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", - "description": "Cloud Monitoring API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/netapp/apiv1": { - "api_shortname": "netapp", - "distribution_name": "cloud.google.com/go/netapp/apiv1", - "description": "NetApp API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkconnectivity/apiv1": { - "api_shortname": "networkconnectivity", - "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", - "description": "Network Connectivity API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkconnectivity/apiv1alpha1": { - "api_shortname": "networkconnectivity", - "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", - "description": "Network Connectivity API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networkmanagement/apiv1": { - "api_shortname": "networkmanagement", - "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", - "description": "Network Management API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/networksecurity/apiv1beta1": { - "api_shortname": "networksecurity", - "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", - "description": "Network Security API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/notebooks/apiv1": { - "api_shortname": "notebooks", - "distribution_name": "cloud.google.com/go/notebooks/apiv1", - "description": "Notebooks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/notebooks/apiv1beta1": { - "api_shortname": "notebooks", - "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", - "description": "Notebooks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/notebooks/apiv2": { - "api_shortname": "notebooks", - "distribution_name": "cloud.google.com/go/notebooks/apiv2", - "description": "Notebooks API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/optimization/apiv1": { - "api_shortname": "cloudoptimization", - "distribution_name": "cloud.google.com/go/optimization/apiv1", - "description": "Cloud Optimization API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/orchestration/airflow/service/apiv1": { - "api_shortname": "composer", - "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", - "description": "Cloud Composer API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/orgpolicy/apiv2": { - "api_shortname": "orgpolicy", - "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", - "description": "Organization Policy API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/agentendpoint/apiv1": { - "api_shortname": "osconfig", - "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", - "description": "OS Config API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { - "api_shortname": "osconfig", - "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "description": "OS Config API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1": { - "api_shortname": "osconfig", - "distribution_name": "cloud.google.com/go/osconfig/apiv1", - "description": "OS Config API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1alpha": { - "api_shortname": "osconfig", - "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", - "description": "OS Config API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/osconfig/apiv1beta": { - "api_shortname": "osconfig", - "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", - "description": "OS Config API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/oslogin/apiv1": { - "api_shortname": "oslogin", - "distribution_name": "cloud.google.com/go/oslogin/apiv1", - "description": "Cloud OS Login API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/oslogin/apiv1beta": { - "api_shortname": "oslogin", - "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", - "description": "Cloud OS Login API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/parallelstore/apiv1beta": { - "api_shortname": "parallelstore", - "distribution_name": "cloud.google.com/go/parallelstore/apiv1beta", - "description": "Parallelstore API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/phishingprotection/apiv1beta1": { - "api_shortname": "phishingprotection", - "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", - "description": "Phishing Protection API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/policysimulator/apiv1": { - "api_shortname": "policysimulator", - "distribution_name": "cloud.google.com/go/policysimulator/apiv1", - "description": "Policy Simulator API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/policytroubleshooter/apiv1": { - "api_shortname": "policytroubleshooter", - "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", - "description": "Policy Troubleshooter API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/policytroubleshooter/iam/apiv3": { - "api_shortname": "policytroubleshooter", - "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", - "description": "Policy Troubleshooter API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/privatecatalog/apiv1beta1": { - "api_shortname": "cloudprivatecatalog", - "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", - "description": "Cloud Private Catalog API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/profiler": { - "api_shortname": "cloudprofiler", - "distribution_name": "cloud.google.com/go/profiler", - "description": "Cloud Profiler", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", - "release_level": "stable", - "library_type": "AGENT" - }, - "cloud.google.com/go/pubsub": { - "api_shortname": "pubsub", - "distribution_name": "cloud.google.com/go/pubsub", - "description": "Cloud PubSub", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/pubsub/apiv1": { - "api_shortname": "pubsub", - "distribution_name": "cloud.google.com/go/pubsub/apiv1", - "description": "Cloud Pub/Sub API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/pubsublite": { - "api_shortname": "pubsublite", - "distribution_name": "cloud.google.com/go/pubsublite", - "description": "Cloud PubSub Lite", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/pubsublite/apiv1": { - "api_shortname": "pubsublite", - "distribution_name": "cloud.google.com/go/pubsublite/apiv1", - "description": "Pub/Sub Lite API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/rapidmigrationassessment/apiv1": { - "api_shortname": "rapidmigrationassessment", - "distribution_name": "cloud.google.com/go/rapidmigrationassessment/apiv1", - "description": "Rapid Migration Assessment API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/rapidmigrationassessment/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { - "api_shortname": "recaptchaenterprise", - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", - "description": "reCAPTCHA Enterprise API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { - "api_shortname": "recaptchaenterprise", - "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", - "description": "reCAPTCHA Enterprise API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommendationengine/apiv1beta1": { - "api_shortname": "recommendationengine", - "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", - "description": "Recommendations AI", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommender/apiv1": { - "api_shortname": "recommender", - "distribution_name": "cloud.google.com/go/recommender/apiv1", - "description": "Recommender API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/recommender/apiv1beta1": { - "api_shortname": "recommender", - "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", - "description": "Recommender API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/redis/apiv1": { - "api_shortname": "redis", - "distribution_name": "cloud.google.com/go/redis/apiv1", - "description": "Google Cloud Memorystore for Redis API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/redis/apiv1beta1": { - "api_shortname": "redis", - "distribution_name": "cloud.google.com/go/redis/apiv1beta1", - "description": "Google Cloud Memorystore for Redis API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/redis/cluster/apiv1": { - "api_shortname": "redis", - "distribution_name": "cloud.google.com/go/redis/cluster/apiv1", - "description": "Google Cloud Memorystore for Redis API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcemanager/apiv2": { - "api_shortname": "cloudresourcemanager", - "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", - "description": "Cloud Resource Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcemanager/apiv3": { - "api_shortname": "cloudresourcemanager", - "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", - "description": "Cloud Resource Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/resourcesettings/apiv1": { - "api_shortname": "resourcesettings", - "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", - "description": "Resource Settings API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2": { - "api_shortname": "retail", - "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2alpha": { - "api_shortname": "retail", - "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/retail/apiv2beta": { - "api_shortname": "retail", - "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/rpcreplay": { - "api_shortname": "rpcreplay", - "distribution_name": "cloud.google.com/go/rpcreplay", - "description": "RPC Replay", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", - "release_level": "stable", - "library_type": "OTHER" - }, - "cloud.google.com/go/run/apiv2": { - "api_shortname": "run", - "distribution_name": "cloud.google.com/go/run/apiv2", - "description": "Cloud Run Admin API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/scheduler/apiv1": { - "api_shortname": "cloudscheduler", - "distribution_name": "cloud.google.com/go/scheduler/apiv1", - "description": "Cloud Scheduler API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/scheduler/apiv1beta1": { - "api_shortname": "cloudscheduler", - "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", - "description": "Cloud Scheduler API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/secretmanager/apiv1": { - "api_shortname": "secretmanager", - "distribution_name": "cloud.google.com/go/secretmanager/apiv1", - "description": "Secret Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securesourcemanager/apiv1": { - "api_shortname": "securesourcemanager", - "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", - "description": "Secure Source Manager API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/security/privateca/apiv1": { - "api_shortname": "privateca", - "distribution_name": "cloud.google.com/go/security/privateca/apiv1", - "description": "Certificate Authority API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/security/publicca/apiv1beta1": { - "api_shortname": "publicca", - "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", - "description": "Public Certificate Authority API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1": { - "api_shortname": "securitycenter", - "distribution_name": "cloud.google.com/go/securitycenter/apiv1", - "description": "Security Command Center API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1beta1": { - "api_shortname": "securitycenter", - "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", - "description": "Security Command Center API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/apiv1p1beta1": { - "api_shortname": "securitycenter", - "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", - "description": "Security Command Center API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycenter/settings/apiv1beta1": { - "api_shortname": "securitycenter", - "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", - "description": "Cloud Security Command Center API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securitycentermanagement/apiv1": { - "api_shortname": "securitycentermanagement", - "distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1", - "description": "Security Center Management API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/securityposture/apiv1": { - "api_shortname": "securityposture", - "distribution_name": "cloud.google.com/go/securityposture/apiv1", - "description": "Security Posture API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securityposture/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicecontrol/apiv1": { - "api_shortname": "servicecontrol", - "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", - "description": "Service Control API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicedirectory/apiv1": { - "api_shortname": "servicedirectory", - "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", - "description": "Service Directory API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicedirectory/apiv1beta1": { - "api_shortname": "servicedirectory", - "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", - "description": "Service Directory API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicehealth/apiv1": { - "api_shortname": "servicehealth", - "distribution_name": "cloud.google.com/go/servicehealth/apiv1", - "description": "Service Health API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/servicemanagement/apiv1": { - "api_shortname": "servicemanagement", - "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", - "description": "Service Management API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/serviceusage/apiv1": { - "api_shortname": "serviceusage", - "distribution_name": "cloud.google.com/go/serviceusage/apiv1", - "description": "Service Usage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/shell/apiv1": { - "api_shortname": "cloudshell", - "distribution_name": "cloud.google.com/go/shell/apiv1", - "description": "Cloud Shell API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/shopping/css/apiv1": { - "api_shortname": "css", - "distribution_name": "cloud.google.com/go/shopping/css/apiv1", - "description": "CSS API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/css/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { - "api_shortname": "merchantapi", - "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", - "description": "Merchant API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner": { - "api_shortname": "spanner", - "distribution_name": "cloud.google.com/go/spanner", - "description": "Cloud Spanner", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/spanner/admin/database/apiv1": { - "api_shortname": "spanner", - "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", - "description": "Cloud Spanner API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner/admin/instance/apiv1": { - "api_shortname": "spanner", - "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", - "description": "Cloud Spanner Instance Admin API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner/apiv1": { - "api_shortname": "spanner", - "distribution_name": "cloud.google.com/go/spanner/apiv1", - "description": "Cloud Spanner API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/spanner/executor/apiv1": { - "api_shortname": "spanner-cloud-executor", - "distribution_name": "cloud.google.com/go/spanner/executor/apiv1", - "description": "Cloud Spanner Executor test API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/executor/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv1": { - "api_shortname": "speech", - "distribution_name": "cloud.google.com/go/speech/apiv1", - "description": "Cloud Speech-to-Text API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv1p1beta1": { - "api_shortname": "speech", - "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", - "description": "Cloud Speech-to-Text API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/speech/apiv2": { - "api_shortname": "speech", - "distribution_name": "cloud.google.com/go/speech/apiv2", - "description": "Cloud Speech-to-Text API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storage": { - "api_shortname": "storage", - "distribution_name": "cloud.google.com/go/storage", - "description": "Cloud Storage (GCS)", - "language": "go", - "client_library_type": "manual", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", - "release_level": "stable", - "library_type": "GAPIC_MANUAL" - }, - "cloud.google.com/go/storage/internal/apiv2": { - "api_shortname": "storage", - "distribution_name": "cloud.google.com/go/storage/internal/apiv2", - "description": "Cloud Storage API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storageinsights/apiv1": { - "api_shortname": "storageinsights", - "distribution_name": "cloud.google.com/go/storageinsights/apiv1", - "description": "Storage Insights API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storageinsights/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/storagetransfer/apiv1": { - "api_shortname": "storagetransfer", - "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", - "description": "Storage Transfer API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/support/apiv2": { - "api_shortname": "cloudsupport", - "distribution_name": "cloud.google.com/go/support/apiv2", - "description": "Google Cloud Support API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/support/latest/apiv2", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/talent/apiv4": { - "api_shortname": "jobs", - "distribution_name": "cloud.google.com/go/talent/apiv4", - "description": "Cloud Talent Solution API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/talent/apiv4beta1": { - "api_shortname": "jobs", - "distribution_name": "cloud.google.com/go/talent/apiv4beta1", - "description": "Cloud Talent Solution API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/telcoautomation/apiv1": { - "api_shortname": "telcoautomation", - "distribution_name": "cloud.google.com/go/telcoautomation/apiv1", - "description": "Telco Automation API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/texttospeech/apiv1": { - "api_shortname": "texttospeech", - "distribution_name": "cloud.google.com/go/texttospeech/apiv1", - "description": "Cloud Text-to-Speech API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/tpu/apiv1": { - "api_shortname": "tpu", - "distribution_name": "cloud.google.com/go/tpu/apiv1", - "description": "Cloud TPU API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/trace/apiv1": { - "api_shortname": "cloudtrace", - "distribution_name": "cloud.google.com/go/trace/apiv1", - "description": "Stackdriver Trace API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/trace/apiv2": { - "api_shortname": "cloudtrace", - "distribution_name": "cloud.google.com/go/trace/apiv2", - "description": "Stackdriver Trace API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/translate/apiv3": { - "api_shortname": "translate", - "distribution_name": "cloud.google.com/go/translate/apiv3", - "description": "Cloud Translation API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/livestream/apiv1": { - "api_shortname": "livestream", - "distribution_name": "cloud.google.com/go/video/livestream/apiv1", - "description": "Live Stream API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/stitcher/apiv1": { - "api_shortname": "videostitcher", - "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", - "description": "Video Stitcher API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/video/transcoder/apiv1": { - "api_shortname": "transcoder", - "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", - "description": "Transcoder API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1": { - "api_shortname": "videointelligence", - "distribution_name": "cloud.google.com/go/videointelligence/apiv1", - "description": "Cloud Video Intelligence API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1beta2": { - "api_shortname": "videointelligence", - "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", - "description": "Google Cloud Video Intelligence API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/videointelligence/apiv1p3beta1": { - "api_shortname": "videointelligence", - "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", - "description": "Cloud Video Intelligence API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vision/v2/apiv1": { - "api_shortname": "vision", - "distribution_name": "cloud.google.com/go/vision/v2/apiv1", - "description": "Cloud Vision API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vision/v2/apiv1p1beta1": { - "api_shortname": "vision", - "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", - "description": "Cloud Vision API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/visionai/apiv1": { - "api_shortname": "visionai", - "distribution_name": "cloud.google.com/go/visionai/apiv1", - "description": "Vision AI API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/visionai/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vmmigration/apiv1": { - "api_shortname": "vmmigration", - "distribution_name": "cloud.google.com/go/vmmigration/apiv1", - "description": "VM Migration API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vmwareengine/apiv1": { - "api_shortname": "vmwareengine", - "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", - "description": "VMware Engine API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/vpcaccess/apiv1": { - "api_shortname": "vpcaccess", - "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", - "description": "Serverless VPC Access API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/webrisk/apiv1": { - "api_shortname": "webrisk", - "distribution_name": "cloud.google.com/go/webrisk/apiv1", - "description": "Web Risk API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/webrisk/apiv1beta1": { - "api_shortname": "webrisk", - "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", - "description": "Web Risk API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/websecurityscanner/apiv1": { - "api_shortname": "websecurityscanner", - "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", - "description": "Web Security Scanner API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/apiv1": { - "api_shortname": "workflows", - "distribution_name": "cloud.google.com/go/workflows/apiv1", - "description": "Workflows API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/apiv1beta": { - "api_shortname": "workflows", - "distribution_name": "cloud.google.com/go/workflows/apiv1beta", - "description": "Workflows API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/executions/apiv1": { - "api_shortname": "workflowexecutions", - "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", - "description": "Workflow Executions API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workflows/executions/apiv1beta": { - "api_shortname": "workflowexecutions", - "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", - "description": "Workflow Executions API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workstations/apiv1": { - "api_shortname": "workstations", - "distribution_name": "cloud.google.com/go/workstations/apiv1", - "description": "Cloud Workstations API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/workstations/apiv1beta": { - "api_shortname": "workstations", - "distribution_name": "cloud.google.com/go/workstations/apiv1beta", - "description": "Cloud Workstations API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1beta", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - } -} diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md deleted file mode 100644 index 972857e9d..000000000 --- a/vendor/cloud.google.com/go/internal/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Internal - -This directory contains internal code for cloud.google.com/go packages. - -## .repo-metadata-full.json - -`.repo-metadata-full.json` contains metadata about the packages in this repo. It -is generated by `internal/gapicgen/generator`. It's processed by external tools -to build lists of all of the packages. - -Don't make breaking changes to the format without consulting with the external -tools. - -One day, we may want to create individual `.repo-metadata.json` files next to -each package, which is the pattern followed by some other languages. External -tools would then talk to pkg.go.dev or some other service to get the overall -list of packages and use the `.repo-metadata.json` files to get the additional -metadata required. For now, `.repo-metadata-full.json` includes everything. - -### Updating OwlBot SHA - -You may want to manually update the which version of the post-processor will be -used -- to do this you need to update the SHA in the OwlBot lock file. - -See the [postprocessor/README](postprocessor/README.md) for detailed -instructions. - -*Note*: OwlBot will eventually open a pull request to update this value if it -discovers a new version of the container. diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go deleted file mode 100644 index 30d7bcf77..000000000 --- a/vendor/cloud.google.com/go/internal/annotate.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "fmt" - - "google.golang.org/api/googleapi" - "google.golang.org/grpc/status" -) - -// Annotate prepends msg to the error message in err, attempting -// to preserve other information in err, like an error code. -// -// Annotate panics if err is nil. -// -// Annotate knows about these error types: -// - "google.golang.org/grpc/status".Status -// - "google.golang.org/api/googleapi".Error -// If the error is not one of these types, Annotate behaves -// like -// -// fmt.Errorf("%s: %v", msg, err) -func Annotate(err error, msg string) error { - if err == nil { - panic("Annotate called with nil") - } - if s, ok := status.FromError(err); ok { - p := s.Proto() - p.Message = msg + ": " + p.Message - return status.ErrorProto(p) - } - if g, ok := err.(*googleapi.Error); ok { - g.Message = msg + ": " + g.Message - return g - } - return fmt.Errorf("%s: %v", msg, err) -} - -// Annotatef uses format and args to format a string, then calls Annotate. -func Annotatef(err error, format string, args ...interface{}) error { - return Annotate(err, fmt.Sprintf(format, args...)) -} diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go deleted file mode 100644 index 72780f764..000000000 --- a/vendor/cloud.google.com/go/internal/optional/optional.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package optional provides versions of primitive types that can -// be nil. These are useful in methods that update some of an API object's -// fields. -package optional - -import ( - "fmt" - "strings" - "time" -) - -type ( - // Bool is either a bool or nil. - Bool interface{} - - // String is either a string or nil. - String interface{} - - // Int is either an int or nil. - Int interface{} - - // Uint is either a uint or nil. - Uint interface{} - - // Float64 is either a float64 or nil. - Float64 interface{} - - // Duration is either a time.Duration or nil. - Duration interface{} -) - -// ToBool returns its argument as a bool. -// It panics if its argument is nil or not a bool. -func ToBool(v Bool) bool { - x, ok := v.(bool) - if !ok { - doPanic("Bool", v) - } - return x -} - -// ToString returns its argument as a string. -// It panics if its argument is nil or not a string. -func ToString(v String) string { - x, ok := v.(string) - if !ok { - doPanic("String", v) - } - return x -} - -// ToInt returns its argument as an int. -// It panics if its argument is nil or not an int. -func ToInt(v Int) int { - x, ok := v.(int) - if !ok { - doPanic("Int", v) - } - return x -} - -// ToUint returns its argument as a uint. -// It panics if its argument is nil or not a uint. -func ToUint(v Uint) uint { - x, ok := v.(uint) - if !ok { - doPanic("Uint", v) - } - return x -} - -// ToFloat64 returns its argument as a float64. -// It panics if its argument is nil or not a float64. -func ToFloat64(v Float64) float64 { - x, ok := v.(float64) - if !ok { - doPanic("Float64", v) - } - return x -} - -// ToDuration returns its argument as a time.Duration. -// It panics if its argument is nil or not a time.Duration. -func ToDuration(v Duration) time.Duration { - x, ok := v.(time.Duration) - if !ok { - doPanic("Duration", v) - } - return x -} - -func doPanic(capType string, v interface{}) { - panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) -} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go deleted file mode 100644 index 4c9220e0d..000000000 --- a/vendor/cloud.google.com/go/internal/retry.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "context" - "fmt" - "time" - - gax "github.com/googleapis/gax-go/v2" -) - -// Retry calls the supplied function f repeatedly according to the provided -// backoff parameters. It returns when one of the following occurs: -// When f's first return value is true, Retry immediately returns with f's second -// return value. -// When the provided context is done, Retry returns with an error that -// includes both ctx.Error() and the last error returned by f. -func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { - return retry(ctx, bo, f, gax.Sleep) -} - -func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), - sleep func(context.Context, time.Duration) error) error { - var lastErr error - for { - stop, err := f() - if stop { - return err - } - // Remember the last "real" error from f. - if err != nil && err != context.Canceled && err != context.DeadlineExceeded { - lastErr = err - } - p := bo.Pause() - if ctxErr := sleep(ctx, p); ctxErr != nil { - if lastErr != nil { - return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr} - } - return ctxErr - } - } -} - -// Use this error type to return an error which allows introspection of both -// the context error and the error from the service. -type wrappedCallErr struct { - ctxErr error - wrappedErr error -} - -func (e wrappedCallErr) Error() string { - return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr) -} - -func (e wrappedCallErr) Unwrap() error { - return e.wrappedErr -} - -// Is allows errors.Is to match the error from the call as well as context -// sentinel errors. -func (e wrappedCallErr) Is(err error) bool { - return e.ctxErr == err || e.wrappedErr == err -} diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go deleted file mode 100644 index 97738b2cb..000000000 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - "sync" - - "go.opencensus.io/trace" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - ottrace "go.opentelemetry.io/otel/trace" - "google.golang.org/api/googleapi" - "google.golang.org/genproto/googleapis/rpc/code" - "google.golang.org/grpc/status" -) - -const ( - // TelemetryPlatformTracingOpenCensus is the value to which the environment - // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be - // set to enable OpenCensus tracing. - TelemetryPlatformTracingOpenCensus = "opencensus" - // TelemetryPlatformTracingOpenCensus is the value to which the environment - // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be - // set to enable OpenTelemetry tracing. - TelemetryPlatformTracingOpenTelemetry = "opentelemetry" - // TelemetryPlatformTracingOpenCensus is the name of the environment - // variable that can be set to change the default tracing from OpenCensus - // to OpenTelemetry. - TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" - // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer - // when it is obtained from the OpenTelemetry TracerProvider. - OpenTelemetryTracerName = "cloud.google.com/go" -) - -var ( - // openTelemetryTracingEnabledMu guards access to openTelemetryTracingEnabled field - openTelemetryTracingEnabledMu = sync.RWMutex{} - // openTelemetryTracingEnabled is true if the environment variable - // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the - // case-insensitive value "opentelemetry". - openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( - os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) -) - -// SetOpenTelemetryTracingEnabledField programmatically sets the value provided by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of unit testing. -// Do not invoke it directly. Intended for use only in unit tests. Restore original value after each test. -func SetOpenTelemetryTracingEnabledField(enabled bool) { - openTelemetryTracingEnabledMu.Lock() - defer openTelemetryTracingEnabledMu.Unlock() - openTelemetryTracingEnabled = enabled -} - -// IsOpenCensusTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the -// case-insensitive value "opentelemetry". -func IsOpenCensusTracingEnabled() bool { - return !IsOpenTelemetryTracingEnabled() -} - -// IsOpenTelemetryTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the -// case-insensitive value "opentelemetry". -func IsOpenTelemetryTracingEnabled() bool { - openTelemetryTracingEnabledMu.RLock() - defer openTelemetryTracingEnabledMu.RUnlock() - return openTelemetryTracingEnabled -} - -// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled -// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled -// returns true, the span will be an OpenTelemetry span. Set the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. -func StartSpan(ctx context.Context, name string) context.Context { - if IsOpenTelemetryTracingEnabled() { - ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) - } else { - ctx, _ = trace.StartSpan(ctx, name) - } - return ctx -} - -// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled -// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled -// returns true, the span will be an OpenTelemetry span. Set the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. -func EndSpan(ctx context.Context, err error) { - if IsOpenTelemetryTracingEnabled() { - span := ottrace.SpanFromContext(ctx) - if err != nil { - span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err)) - span.RecordError(err) - } - span.End() - } else { - span := trace.FromContext(ctx) - if err != nil { - span.SetStatus(toStatus(err)) - } - span.End() - } -} - -// toStatus converts an error to an equivalent OpenCensus status. -func toStatus(err error) trace.Status { - var err2 *googleapi.Error - if ok := errors.As(err, &err2); ok { - return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} - } else if s, ok := status.FromError(err); ok { - return trace.Status{Code: int32(s.Code()), Message: s.Message()} - } else { - return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} - } -} - -// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description. -func toOpenTelemetryStatusDescription(err error) string { - var err2 *googleapi.Error - if ok := errors.As(err, &err2); ok { - return err2.Message - } else if s, ok := status.FromError(err); ok { - return s.Message() - } else { - return err.Error() - } -} - -// TODO(deklerk): switch to using OpenCensus function when it becomes available. -// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto -func httpStatusCodeToOCCode(httpStatusCode int) int32 { - switch httpStatusCode { - case 200: - return int32(code.Code_OK) - case 499: - return int32(code.Code_CANCELLED) - case 500: - return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS - case 400: - return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE - case 504: - return int32(code.Code_DEADLINE_EXCEEDED) - case 404: - return int32(code.Code_NOT_FOUND) - case 409: - return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED - case 403: - return int32(code.Code_PERMISSION_DENIED) - case 401: - return int32(code.Code_UNAUTHENTICATED) - case 429: - return int32(code.Code_RESOURCE_EXHAUSTED) - case 501: - return int32(code.Code_UNIMPLEMENTED) - case 503: - return int32(code.Code_UNAVAILABLE) - default: - return int32(code.Code_UNKNOWN) - } -} - -// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then: -// * calls Span.Annotatef if OpenCensus is enabled; or -// * calls Span.AddEvent if OpenTelemetry is enabled. -// -// If IsOpenCensusTracingEnabled returns true, the expected span must be an -// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected -// span must be an OpenTelemetry span. Set the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. -func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { - if IsOpenTelemetryTracingEnabled() { - attrs := otAttrs(attrMap) - ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...)) - } else { - attrs := ocAttrs(attrMap) - // TODO: (odeke-em): perhaps just pass around spans due to the cost - // incurred from using trace.FromContext(ctx) yet we could avoid - // throwing away the work done by ctx, span := trace.StartSpan. - trace.FromContext(ctx).Annotatef(attrs, format, args...) - } -} - -// ocAttrs converts a generic map to OpenCensus attributes. -func ocAttrs(attrMap map[string]interface{}) []trace.Attribute { - var attrs []trace.Attribute - for k, v := range attrMap { - var a trace.Attribute - switch v := v.(type) { - case string: - a = trace.StringAttribute(k, v) - case bool: - a = trace.BoolAttribute(k, v) - case int: - a = trace.Int64Attribute(k, int64(v)) - case int64: - a = trace.Int64Attribute(k, v) - default: - a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) - } - attrs = append(attrs, a) - } - return attrs -} - -// otAttrs converts a generic map to OpenTelemetry attributes. -func otAttrs(attrMap map[string]interface{}) []attribute.KeyValue { - var attrs []attribute.KeyValue - for k, v := range attrMap { - var a attribute.KeyValue - switch v := v.(type) { - case string: - a = attribute.Key(k).String(v) - case bool: - a = attribute.Key(k).Bool(v) - case int: - a = attribute.Key(k).Int(v) - case int64: - a = attribute.Key(k).Int64(v) - default: - a = attribute.Key(k).String(fmt.Sprintf("%#v", v)) - } - attrs = append(attrs, a) - } - return attrs -} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh deleted file mode 100644 index d7c5a3e21..000000000 --- a/vendor/cloud.google.com/go/internal/version/update_version.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -today=$(date +%Y%m%d) - -sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE - diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go deleted file mode 100644 index fd9dd91e9..000000000 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./update_version.sh - -// Package version contains version information for Google Cloud Client -// Libraries for Go, as reported in request headers. -package version - -import ( - "runtime" - "strings" - "unicode" -) - -// Repo is the current version of the client libraries in this -// repo. It should be a date in YYYYMMDD format. -const Repo = "20201104" - -// Go returns the Go runtime version. The returned string -// has no whitespace. -func Go() string { - return goVersion -} - -var goVersion = goVer(runtime.Version()) - -const develPrefix = "devel +" - -func goVer(s string) string { - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "" -} - -func notSemverRune(r rune) bool { - return !strings.ContainsRune("0123456789.", r) -} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md deleted file mode 100644 index 0109034bd..000000000 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ /dev/null @@ -1,505 +0,0 @@ -# Changes - - -## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.37.0...storage/v1.38.0) (2024-02-12) - - -### Features - -* **storage:** Support auto-detection of access ID for external_account creds ([#9208](https://github.com/googleapis/google-cloud-go/issues/9208)) ([b958d44](https://github.com/googleapis/google-cloud-go/commit/b958d44589f2b6b226ea3bef23829ac75a0aa6a6)) -* **storage:** Support custom hostname for VirtualHostedStyle SignedURLs ([#9348](https://github.com/googleapis/google-cloud-go/issues/9348)) ([7eec40e](https://github.com/googleapis/google-cloud-go/commit/7eec40e4cf82c53e5bf02bd2c14e0b25043da6d0)) -* **storage:** Support universe domains ([#9344](https://github.com/googleapis/google-cloud-go/issues/9344)) ([29a7498](https://github.com/googleapis/google-cloud-go/commit/29a7498b8eb0d00fdb5acd7ee8ce0e5a2a8c11ce)) - - -### Bug Fixes - -* **storage:** Fix v4 url signing for hosts that specify ports ([#9347](https://github.com/googleapis/google-cloud-go/issues/9347)) ([f127b46](https://github.com/googleapis/google-cloud-go/commit/f127b4648f861c1ba44f41a280a62652620c04c2)) - - -### Documentation - -* **storage:** Indicate that gRPC is incompatible with universe domains ([#9386](https://github.com/googleapis/google-cloud-go/issues/9386)) ([e8bd85b](https://github.com/googleapis/google-cloud-go/commit/e8bd85bbce12d5f7ab87fa49d166a6a0d84bd12d)) - -## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) - - -### Features - -* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) -* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) - - -### Bug Fixes - -* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) - -## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) - - -### Features - -* **storage:** Add object retention feature ([#9072](https://github.com/googleapis/google-cloud-go/issues/9072)) ([16ecfd1](https://github.com/googleapis/google-cloud-go/commit/16ecfd150ff1982f03d207a80a82e934d1013874)) - - -### Bug Fixes - -* **storage:** Do not inhibit the dead code elimination. ([#8543](https://github.com/googleapis/google-cloud-go/issues/8543)) ([ca2493f](https://github.com/googleapis/google-cloud-go/commit/ca2493f43c299bbaed5f7e5b70f66cc763ff9802)) -* **storage:** Set flush and get_state to false on the last write in gRPC ([#9013](https://github.com/googleapis/google-cloud-go/issues/9013)) ([c1e9fe5](https://github.com/googleapis/google-cloud-go/commit/c1e9fe5f4166a71e55814ccf126926ec0e0e7945)) - -## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) - - -### Bug Fixes - -* **storage:** Rename aux.go to auxiliary.go fixing windows build ([ba23673](https://github.com/googleapis/google-cloud-go/commit/ba23673da7707c31292e4aa29d65b7ac1446d4a6)) - -## [1.35.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.1...storage/v1.35.0) (2023-11-09) - - -### Features - -* **storage:** Change gRPC writes to use bi-directional streams ([#8930](https://github.com/googleapis/google-cloud-go/issues/8930)) ([3e23a36](https://github.com/googleapis/google-cloud-go/commit/3e23a364b1a20c4fda7aef257e4136586ec769a4)) - -## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.0...storage/v1.34.1) (2023-11-01) - - -### Bug Fixes - -* **storage:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) - -## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.33.0...storage/v1.34.0) (2023-10-31) - - -### Features - -* **storage/internal:** Add match_glob field to ListObjectsRequest ([#8618](https://github.com/googleapis/google-cloud-go/issues/8618)) ([e9ae601](https://github.com/googleapis/google-cloud-go/commit/e9ae6018983ae09781740e4ff939e6e365863dbb)) -* **storage/internal:** Add terminal_storage_class fields to Autoclass message ([57fc1a6](https://github.com/googleapis/google-cloud-go/commit/57fc1a6de326456eb68ef25f7a305df6636ed386)) -* **storage/internal:** Adds the RestoreObject operation ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0)) -* **storage:** Support autoclass v2.1 ([#8721](https://github.com/googleapis/google-cloud-go/issues/8721)) ([fe1e195](https://github.com/googleapis/google-cloud-go/commit/fe1e19590a252c6adc6ca6c51a69b6e561e143b8)) -* **storage:** Support MatchGlob for gRPC ([#8670](https://github.com/googleapis/google-cloud-go/issues/8670)) ([3df0287](https://github.com/googleapis/google-cloud-go/commit/3df0287f88d5e2c4526e9e6b8dc2a4ca54f88918)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) - - -### Bug Fixes - -* **storage:** Drop stream reference after closing it for gRPC writes ([#8872](https://github.com/googleapis/google-cloud-go/issues/8872)) ([525abde](https://github.com/googleapis/google-cloud-go/commit/525abdee433864d4d456f1f1fff5599017b557ff)) -* **storage:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) -* **storage:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) -* **storage:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) - -## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07) - - -### Features - -* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a)) - -## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15) - - -### Features - -* **storage:** Add support for custom headers ([#8294](https://github.com/googleapis/google-cloud-go/issues/8294)) ([313fd4a](https://github.com/googleapis/google-cloud-go/commit/313fd4a60380d36c5ecaead3e968dbc84d044a0b)) -* **storage:** Add trace span to Writer ([#8375](https://github.com/googleapis/google-cloud-go/issues/8375)) ([f7ac85b](https://github.com/googleapis/google-cloud-go/commit/f7ac85bec2806d351529714bd7744a91a9fdefdd)), refs [#6144](https://github.com/googleapis/google-cloud-go/issues/6144) -* **storage:** Support single-shot uploads in gRPC ([#8348](https://github.com/googleapis/google-cloud-go/issues/8348)) ([7de4a7d](https://github.com/googleapis/google-cloud-go/commit/7de4a7da31ab279a343b1592b15a126cda03e5e7)), refs [#7798](https://github.com/googleapis/google-cloud-go/issues/7798) -* **storage:** Trace span covers life of a Reader ([#8390](https://github.com/googleapis/google-cloud-go/issues/8390)) ([8de30d7](https://github.com/googleapis/google-cloud-go/commit/8de30d752eec2fed2ea4c127482d3e213f9050e2)) - - -### Bug Fixes - -* **storage:** Fix AllObjects condition in gRPC ([#8184](https://github.com/googleapis/google-cloud-go/issues/8184)) ([2b99e4f](https://github.com/googleapis/google-cloud-go/commit/2b99e4f39be20fe21e8bc5c1ec1c0e758222c46e)), refs [#6205](https://github.com/googleapis/google-cloud-go/issues/6205) -* **storage:** Fix gRPC generation/condition issues ([#8396](https://github.com/googleapis/google-cloud-go/issues/8396)) ([ca68ff5](https://github.com/googleapis/google-cloud-go/commit/ca68ff54b680732b59b223655070d0f6abccefee)) -* **storage:** Same method name and Trace Span name ([#8150](https://github.com/googleapis/google-cloud-go/issues/8150)) ([e277213](https://github.com/googleapis/google-cloud-go/commit/e2772133896bb94097b5d1f090f1bcafd136f2ed)) -* **storage:** Update gRPC retry codes ([#8202](https://github.com/googleapis/google-cloud-go/issues/8202)) ([afdf772](https://github.com/googleapis/google-cloud-go/commit/afdf772fc6a90b3010eee9d70ab65e22e276f53f)) - -## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.1...storage/v1.31.0) (2023-06-27) - - -### Features - -* **storage/internal:** Add ctype=CORD for ChecksummedData.content ([ca94e27](https://github.com/googleapis/google-cloud-go/commit/ca94e2724f9e2610b46aefd0a3b5ddc06102e91b)) -* **storage:** Add support for MatchGlob ([#8097](https://github.com/googleapis/google-cloud-go/issues/8097)) ([9426a5a](https://github.com/googleapis/google-cloud-go/commit/9426a5a45d4c2fd07f84261f6d602680e79cdc48)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) [#7728](https://github.com/googleapis/google-cloud-go/issues/7728) -* **storage:** Respect WithEndpoint for SignedURLs and PostPolicy ([#8113](https://github.com/googleapis/google-cloud-go/issues/8113)) ([f918f23](https://github.com/googleapis/google-cloud-go/commit/f918f23a3cda4fbc8d709e32b914ead8b735d664)) -* **storage:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) - - -### Bug Fixes - -* **storage:** Fix CreateBucket logic for gRPC ([#8165](https://github.com/googleapis/google-cloud-go/issues/8165)) ([8424e7e](https://github.com/googleapis/google-cloud-go/commit/8424e7e145a117c91006318fa924a8b2643c1c7e)), refs [#8162](https://github.com/googleapis/google-cloud-go/issues/8162) -* **storage:** Fix reads with "./" in object names [XML] ([#8017](https://github.com/googleapis/google-cloud-go/issues/8017)) ([6b7b21f](https://github.com/googleapis/google-cloud-go/commit/6b7b21f8a334b6ad3a25e1f66ae1265b4d1f0995)) -* **storage:** Fix routing header for writes ([#8159](https://github.com/googleapis/google-cloud-go/issues/8159)) ([42a59f5](https://github.com/googleapis/google-cloud-go/commit/42a59f5a23ab9b4743ab032ad92304922c801d93)), refs [#8142](https://github.com/googleapis/google-cloud-go/issues/8142) [#8143](https://github.com/googleapis/google-cloud-go/issues/8143) [#8144](https://github.com/googleapis/google-cloud-go/issues/8144) [#8145](https://github.com/googleapis/google-cloud-go/issues/8145) [#8149](https://github.com/googleapis/google-cloud-go/issues/8149) -* **storage:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) -* **storage:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) - - -### Documentation - -* **storage/internal:** Clarifications about behavior of DeleteObject RPC ([3f1ed9c](https://github.com/googleapis/google-cloud-go/commit/3f1ed9c63fb115f47607a3ab478842fe5ba0df11)) -* **storage/internal:** Clarified the behavior of supplying bucket.name field in CreateBucket to reflect actual implementation ([ebae64d](https://github.com/googleapis/google-cloud-go/commit/ebae64d53397ec5dfe851f098754eaa1f5df7cb1)) -* **storage/internal:** Revert ChecksummedData message definition not to specify ctype=CORD, because it would be a breaking change. ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) -* **storage/internal:** Update routing annotations for CancelResumableWriteRequest and QueryWriteStatusRequest ([4900851](https://github.com/googleapis/google-cloud-go/commit/49008518e168fe6f7891b907d6fc14eecdef758c)) -* **storage/internal:** Updated ChecksummedData message definition to specify ctype=CORD, and removed incorrect earlier attempt that set that annotation in the ReadObjectResponse message definition ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) -* **storage:** WithXMLReads should mention XML instead of JSON API ([#7881](https://github.com/googleapis/google-cloud-go/issues/7881)) ([36f56c8](https://github.com/googleapis/google-cloud-go/commit/36f56c80c456ca74ffc03df76844ce15980ced82)) - -## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21) - - -### Bug Fixes - -* **storage:** Retract versions with Copier bug ([#7583](https://github.com/googleapis/google-cloud-go/issues/7583)) ([9c10b6f](https://github.com/googleapis/google-cloud-go/commit/9c10b6f8a54cb8447260148b5e4a9b5160281020)) - * Versions v1.25.0-v1.27.0 are retracted due to [#6857](https://github.com/googleapis/google-cloud-go/issues/6857). -* **storage:** SignedURL v4 allows headers with colons in value ([#7603](https://github.com/googleapis/google-cloud-go/issues/7603)) ([6b50f9b](https://github.com/googleapis/google-cloud-go/commit/6b50f9b368f5b271ade1706c342865cef46712e6)) - -## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.29.0...storage/v1.30.0) (2023-03-15) - - -### Features - -* **storage/internal:** Update routing annotation for CreateBucketRequest docs: Add support for end-to-end checksumming in the gRPC WriteObject flow feat!: BREAKING CHANGE - renaming Notification to NotificationConfig ([2fef56f](https://github.com/googleapis/google-cloud-go/commit/2fef56f75a63dc4ff6e0eea56c7b26d4831c8e27)) -* **storage:** Json downloads ([#7158](https://github.com/googleapis/google-cloud-go/issues/7158)) ([574a86c](https://github.com/googleapis/google-cloud-go/commit/574a86c614445f8c3f5a54446820df774c31cd47)) -* **storage:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) - - -### Bug Fixes - -* **storage:** Specify credentials with STORAGE_EMULATOR_HOST ([#7271](https://github.com/googleapis/google-cloud-go/issues/7271)) ([940ae15](https://github.com/googleapis/google-cloud-go/commit/940ae15f725ff384e345e627feb03d22e1fd8db5)) - -## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) - - -### Features - -* **storage:** Add ComponentCount as part of ObjectAttrs ([#7230](https://github.com/googleapis/google-cloud-go/issues/7230)) ([a19bca6](https://github.com/googleapis/google-cloud-go/commit/a19bca60704b4fbb674cf51d828580aa653c8210)) -* **storage:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) - - -### Documentation - -* **storage/internal:** Corrected typos and spellings ([7357077](https://github.com/googleapis/google-cloud-go/commit/735707796d81d7f6f32fc3415800c512fe62297e)) - -## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) - - -### Bug Fixes - -* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) - -## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) - - -### Features - -* **storage/internal:** Add routing annotations ([ce3f945](https://github.com/googleapis/google-cloud-go/commit/ce3f9458e511eca0910992763232abbcd64698f1)) -* **storage:** Add Autoclass support ([#6828](https://github.com/googleapis/google-cloud-go/issues/6828)) ([f7c7f41](https://github.com/googleapis/google-cloud-go/commit/f7c7f41e4d7fcffe05860e1114cb20f40c869da8)) - - -### Bug Fixes - -* **storage:** Fix read-write race in Writer.Write ([#6817](https://github.com/googleapis/google-cloud-go/issues/6817)) ([4766d3e](https://github.com/googleapis/google-cloud-go/commit/4766d3e1004119b93c6bd352024b5bf3404252eb)) -* **storage:** Fix request token passing for Copier.Run ([#6863](https://github.com/googleapis/google-cloud-go/issues/6863)) ([faaab06](https://github.com/googleapis/google-cloud-go/commit/faaab066d8e509dc440bcbc87391557ecee7dbf2)), refs [#6857](https://github.com/googleapis/google-cloud-go/issues/6857) - - -### Documentation - -* **storage:** Update broken links for SignURL and PostPolicy ([#6779](https://github.com/googleapis/google-cloud-go/issues/6779)) ([776138b](https://github.com/googleapis/google-cloud-go/commit/776138bc06a1e5fd45acbf8f9d36e9dc6ce31dd3)) - -## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22) - - -### Features - -* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8)) - -## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29) - - -### Features - -* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362) - - -### Bug Fixes - -* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e)) - -## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11) - - -### Features - -* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f)) -* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e)) - -## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20) - - -### Features - -* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d)) - -## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23) - - -### Features - -* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d)) -* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795) - - -### Bug Fixes - -* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473)) - -### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19) - - -### Bug Fixes - -* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07)) -* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973)) - -## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31) - - -### Features - -* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072)) -* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) - - -### Bug Fixes - -* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341)) - -## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17) - - -### Features - -* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749) -* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e)) - -## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04) - - -### Features - -* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6)) - -## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25) - - -### Features - -* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314)) - * This release contains changes to fully align this library's retry strategy - with best practices as described in the - Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy). - * The library will now retry only idempotent operations by default. This means - that for certain operations, including object upload, compose, rewrite, - update, and delete, requests will not be retried by default unless - [idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency) - for the request have been met. - * The library now has methods to configure aspects of retry policy for - API calls, including which errors are retried, the timing of the - exponential backoff, and how idempotency is taken into account. - * If you wish to re-enable retries for a non-idempotent request, use the - [RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways) - policy. - * For full details on how to configure retries, see the - [package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests) - and the - [Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy) -* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8)) -* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c)) -* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0)) - -### Bug Fixes - -* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264)) - -### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18) - - -### Bug Fixes - -* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991) - -### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14) - - -### Bug Fixes - -* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb)) - -## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11) - - -### Features - -* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197) -* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315)) - - -### Bug Fixes - -* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8)) - -## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28) - - -### Features - -* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02)) - - -### Bug Fixes - -* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393)) - -### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30) - - -### Bug Fixes - -* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758)) -* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd)) -* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01)) -* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1)) -* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437) -* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470) - -## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28) - - -### Features - -* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203) - - -### Bug Fixes - -* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167) -* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040) - -## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21) - - -### Features - -* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2 - config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)). - -### Bug Fixes - -* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc)) - - -## v1.14.0 - -- Updates to various dependencies. - -## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) - - -### Features - -* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) -* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) - - -### Bug Fixes - -* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) - -## v1.12.0 -- V4 signed URL fixes: - - Fix encoding of spaces in query parameters. - - Add fields that were missing from PostPolicyV4 policy conditions. -- Fix Query to correctly list prefixes as well as objects when SetAttrSelection - is used. - -## v1.11.0 -- Add support for CustomTime and NoncurrentTime object lifecycle management - features. - -## v1.10.0 -- Bump dependency on google.golang.org/api to capture changes to retry logic - which will make retries on writes more resilient. -- Improve documentation for Writer.ChunkSize. -- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. - -## v1.9.0 -- Add retry for transient network errors on most operations (with the exception - of writes). -- Bump dependency for google.golang.org/api to capture a change in the default - HTTP transport which will improve performance for reads under heavy load. -- Add CRC32C checksum validation option to Composer. - -## v1.8.0 -- Add support for V4 signed post policies. - -## v1.7.0 -- V4 signed URL support: - - Add support for bucket-bound domains and virtual hosted style URLs. - - Add support for query parameters in the signature. - - Fix text encoding to align with standards. -- Add the object name to query parameters for write calls. -- Fix retry behavior when reading files with Content-Encoding gzip. -- Fix response header in reader. -- New code examples: - - Error handling for `ObjectHandle` preconditions. - - Existence checks for buckets and objects. - -## v1.6.0 - -- Updated option handling: - - Don't drop custom scopes (#1756) - - Don't drop port in provided endpoint (#1737) - -## v1.5.0 - -- Honor WithEndpoint client option for reads as well as writes. -- Add archive storage class to docs. -- Make fixes to storage benchwrapper. - -## v1.4.0 - -- When listing objects in a bucket, allow callers to specify which attributes - are queried. This allows for performance optimization. - -## v1.3.0 - -- Use `storage.googleapis.com/storage/v1` by default for GCS requests - instead of `www.googleapis.com/storage/v1`. - -## v1.2.1 - -- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not - being sent in all cases. - -## v1.2.0 - -- Add support for UniformBucketLevelAccess. This configures access checks - to use only bucket-level IAM policies. - See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. -- Fix userAgent to use correct version. - -## v1.1.2 - -- Fix memory leak in BucketIterator and ObjectIterator. - -## v1.1.1 - -- Send BucketPolicyOnly even when it's disabled. - -## v1.1.0 - -- Performance improvements for ObjectIterator and BucketIterator. -- Fix Bucket.ObjectIterator size calculation checks. -- Added HMACKeyOptions to all the methods which allows for options such as - UserProject to be set per invocation and optionally be used. - -## v1.0.0 - -This is the first tag to carve out storage as its own module. See: -https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/cloud.google.com/go/storage/LICENSE b/vendor/cloud.google.com/go/storage/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/cloud.google.com/go/storage/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md deleted file mode 100644 index b2f411210..000000000 --- a/vendor/cloud.google.com/go/storage/README.md +++ /dev/null @@ -1,32 +0,0 @@ -## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) - -- [About Cloud Storage](https://cloud.google.com/storage/) -- [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage) - -### Example Usage - -First create a `storage.Client` to use throughout your application: - -[snip]:# (storage-1) -```go -client, err := storage.NewClient(ctx) -if err != nil { - log.Fatal(err) -} -``` - -[snip]:# (storage-2) -```go -// Read the object1 from bucket. -rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) -if err != nil { - log.Fatal(err) -} -defer rc.Close() -body, err := io.ReadAll(rc) -if err != nil { - log.Fatal(err) -} -``` diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go deleted file mode 100644 index 74799e55e..000000000 --- a/vendor/cloud.google.com/go/storage/acl.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "net/http" - "reflect" - - "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal/apiv2/storagepb" - raw "google.golang.org/api/storage/v1" -) - -// ACLRole is the level of access to grant. -type ACLRole string - -const ( - RoleOwner ACLRole = "OWNER" - RoleReader ACLRole = "READER" - RoleWriter ACLRole = "WRITER" -) - -// ACLEntity refers to a user or group. -// They are sometimes referred to as grantees. -// -// It could be in the form of: -// "user-", "user-", "group-", "group-", -// "domain-" and "project-team-". -// -// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. -type ACLEntity string - -const ( - AllUsers ACLEntity = "allUsers" - AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" -) - -// ACLRule represents a grant for a role to an entity (user, group or team) for a -// Google Cloud Storage object or bucket. -type ACLRule struct { - Entity ACLEntity - EntityID string - Role ACLRole - Domain string - Email string - ProjectTeam *ProjectTeam -} - -// ProjectTeam is the project team associated with the entity, if any. -type ProjectTeam struct { - ProjectNumber string - Team string -} - -// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. -// ACLHandle on an object operates on the latest generation of that object by default. -// Selecting a specific generation of an object is not currently supported by the client. -type ACLHandle struct { - c *Client - bucket string - object string - isDefault bool - userProject string // for requester-pays buckets - retry *retryConfig -} - -// Delete permanently deletes the ACL entry for the given entity. -func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectDelete(ctx, entity) - } - if a.isDefault { - return a.bucketDefaultDelete(ctx, entity) - } - return a.bucketDelete(ctx, entity) -} - -// Set sets the role for the given entity. -func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectSet(ctx, entity, role, false) - } - if a.isDefault { - return a.objectSet(ctx, entity, role, true) - } - return a.bucketSet(ctx, entity, role) -} - -// List retrieves ACL entries. -func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") - defer func() { trace.EndSpan(ctx, err) }() - - if a.object != "" { - return a.objectList(ctx) - } - if a.isDefault { - return a.bucketDefaultList(ctx) - } - return a.bucketList(ctx) -} - -func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...) -} - -func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...) -} - -func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...) -} - -func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...) -} - -func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...) -} - -func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { - opts := makeStorageOpts(true, a.retry, a.userProject) - return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...) -} - -func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - if isBucketDefault { - return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...) - } - return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...) -} - -func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { - opts := makeStorageOpts(false, a.retry, a.userProject) - return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...) -} - -func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if a.userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) - } - setClientHeader(call.Header()) -} - -func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toObjectACLRule(item)) - } - return rs -} - -func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toObjectACLRuleFromProto(item)) - } - return rs -} - -func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toBucketACLRule(item)) - } - return rs -} - -func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule { - var rs []ACLRule - for _, item := range items { - rs = append(rs, toBucketACLRuleFromProto(item)) - } - return rs -} - -func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.Entity), - EntityID: a.EntityId, - Role: ACLRole(a.Role), - Domain: a.Domain, - Email: a.Email, - ProjectTeam: toObjectProjectTeam(a.ProjectTeam), - } -} - -func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.GetEntity()), - EntityID: a.GetEntityId(), - Role: ACLRole(a.GetRole()), - Domain: a.GetDomain(), - Email: a.GetEmail(), - ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), - } -} - -func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.Entity), - EntityID: a.EntityId, - Role: ACLRole(a.Role), - Domain: a.Domain, - Email: a.Email, - ProjectTeam: toBucketProjectTeam(a.ProjectTeam), - } -} - -func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule { - return ACLRule{ - Entity: ACLEntity(a.GetEntity()), - EntityID: a.GetEntityId(), - Role: ACLRole(a.GetRole()), - Domain: a.GetDomain(), - Email: a.GetEmail(), - ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()), - } -} - -func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*raw.ObjectAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary - } - return r -} - -func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*storagepb.ObjectAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary - } - return r -} - -func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*raw.BucketAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary - } - return r -} - -func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl { - if len(rules) == 0 { - return nil - } - r := make([]*storagepb.BucketAccessControl, 0, len(rules)) - for _, rule := range rules { - r = append(r, rule.toProtoBucketAccessControl()) - } - return r -} - -func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { - return &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { - return &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl { - return &storagepb.ObjectAccessControl{ - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl { - return &storagepb.BucketAccessControl{ - Entity: string(r.Entity), - Role: string(r.Role), - // The other fields are not settable. - } -} - -func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.ProjectNumber, - Team: p.Team, - } -} - -func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.GetProjectNumber(), - Team: p.GetTeam(), - } -} - -func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { - if p == nil { - return nil - } - return &ProjectTeam{ - ProjectNumber: p.ProjectNumber, - Team: p.Team, - } -} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go deleted file mode 100644 index 0344ef9de..000000000 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ /dev/null @@ -1,2237 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "reflect" - "strings" - "time" - - "cloud.google.com/go/compute/metadata" - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal/apiv2/storagepb" - "google.golang.org/api/googleapi" - "google.golang.org/api/iamcredentials/v1" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - raw "google.golang.org/api/storage/v1" - dpb "google.golang.org/genproto/googleapis/type/date" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/durationpb" -) - -// BucketHandle provides operations on a Google Cloud Storage bucket. -// Use Client.Bucket to get a handle. -type BucketHandle struct { - c *Client - name string - acl ACLHandle - defaultObjectACL ACLHandle - conds *BucketConditions - userProject string // project for Requester Pays buckets - retry *retryConfig - enableObjectRetention *bool -} - -// Bucket returns a BucketHandle, which provides operations on the named bucket. -// This call does not perform any network operations. -// -// The supplied name must contain only lowercase letters, numbers, dashes, -// underscores, and dots. The full specification for valid bucket names can be -// found at: -// -// https://cloud.google.com/storage/docs/bucket-naming -func (c *Client) Bucket(name string) *BucketHandle { - retry := c.retry.clone() - return &BucketHandle{ - c: c, - name: name, - acl: ACLHandle{ - c: c, - bucket: name, - retry: retry, - }, - defaultObjectACL: ACLHandle{ - c: c, - bucket: name, - isDefault: true, - retry: retry, - }, - retry: retry, - } -} - -// Create creates the Bucket in the project. -// If attrs is nil the API defaults will be used. -func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - - if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil { - return err - } - return nil -} - -// Delete deletes the Bucket. -func (b *BucketHandle) Delete(ctx context.Context) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...) -} - -// ACL returns an ACLHandle, which provides access to the bucket's access control list. -// This controls who can list, create or overwrite the objects in a bucket. -// This call does not perform any network operations. -func (b *BucketHandle) ACL() *ACLHandle { - return &b.acl -} - -// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. -// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. -// This call does not perform any network operations. -func (b *BucketHandle) DefaultObjectACL() *ACLHandle { - return &b.defaultObjectACL -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations such as fetching the object or verifying its existence. -// Use methods on ObjectHandle to perform network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// -// https://cloud.google.com/storage/docs/naming-objects -func (b *BucketHandle) Object(name string) *ObjectHandle { - retry := b.retry.clone() - return &ObjectHandle{ - c: b.c, - bucket: b.name, - object: name, - acl: ACLHandle{ - c: b.c, - bucket: b.name, - object: name, - userProject: b.userProject, - retry: retry, - }, - gen: -1, - userProject: b.userProject, - retry: retry, - } -} - -// Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.GetBucket(ctx, b.name, b.conds, o...) -} - -// Update updates a bucket's attributes. -func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update") - defer func() { trace.EndSpan(ctx, err) }() - - isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 - o := makeStorageOpts(isIdempotent, b.retry, b.userProject) - return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...) -} - -// SignedURL returns a URL for the specified object. Signed URLs allow anyone -// access to a restricted resource for a limited time without needing a Google -// account or signing in. -// For more information about signed URLs, see "[Overview of access control]." -// -// This method requires the Method and Expires fields in the specified -// SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and -// PrivateKey fields in some cases. Read more on the [automatic detection of credentials] -// for this method. -// -// [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing -func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { - // Make a copy of opts so we don't modify the pointer parameter. - newopts := opts.clone() - - if newopts.Hostname == "" { - // Extract the correct host from the readhost set on the client - newopts.Hostname = b.c.xmlHost - } - - if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return SignedURL(b.name, object, newopts) - } - - if newopts.GoogleAccessID == "" { - id, err := b.detectDefaultGoogleAccessID() - if err != nil { - return "", err - } - newopts.GoogleAccessID = id - } - if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 { - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - PrivateKey string `json:"private_key"` - } - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err == nil && sa.PrivateKey != "" { - newopts.PrivateKey = []byte(sa.PrivateKey) - } - } - - // Don't error out if we can't unmarshal the private key from the client, - // fallback to the default sign function for the service account. - if len(newopts.PrivateKey) == 0 { - newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) - } - } - return SignedURL(b.name, object, newopts) -} - -// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. -// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. -// -// This method requires the Expires field in the specified PostPolicyV4Options -// to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields -// in some cases. Read more on the [automatic detection of credentials] for this method. -// -// [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing -func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - // Make a copy of opts so we don't modify the pointer parameter. - newopts := opts.clone() - - if newopts.Hostname == "" { - // Extract the correct host from the readhost set on the client - newopts.Hostname = b.c.xmlHost - } - - if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return GenerateSignedPostPolicyV4(b.name, object, newopts) - } - - if newopts.GoogleAccessID == "" { - id, err := b.detectDefaultGoogleAccessID() - if err != nil { - return nil, err - } - newopts.GoogleAccessID = id - } - if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 { - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - PrivateKey string `json:"private_key"` - } - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err == nil && sa.PrivateKey != "" { - newopts.PrivateKey = []byte(sa.PrivateKey) - } - } - - // Don't error out if we can't unmarshal the private key from the client, - // fallback to the default sign function for the service account. - if len(newopts.PrivateKey) == 0 { - newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID) - } - } - return GenerateSignedPostPolicyV4(b.name, object, newopts) -} - -func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { - returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)") - - if b.c.creds != nil && len(b.c.creds.JSON) > 0 { - var sa struct { - ClientEmail string `json:"client_email"` - SAImpersonationURL string `json:"service_account_impersonation_url"` - CredType string `json:"type"` - } - - err := json.Unmarshal(b.c.creds.JSON, &sa) - if err != nil { - returnErr = err - } else { - switch sa.CredType { - case "impersonated_service_account", "external_account": - start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") - - if end <= start { - returnErr = errors.New("error parsing external or impersonated service account credentials") - } else { - return sa.SAImpersonationURL[start+1 : end], nil - } - case "service_account": - if sa.ClientEmail != "" { - return sa.ClientEmail, nil - } - returnErr = errors.New("empty service account client email") - default: - returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported") - } - } - } - - // Don't error out if we can't unmarshal, fallback to GCE check. - if metadata.OnGCE() { - email, err := metadata.Email("default") - if err == nil && email != "" { - return email, nil - } else if err != nil { - returnErr = err - } else { - returnErr = errors.New("empty email from GCE metadata service") - } - - } - return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr) -} - -func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { - return func(in []byte) ([]byte, error) { - ctx := context.Background() - - // It's ok to recreate this service per call since we pass in the http client, - // circumventing the cost of recreating the auth/transport layer - svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc)) - if err != nil { - return nil, fmt.Errorf("unable to create iamcredentials client: %w", err) - } - - resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{ - Payload: base64.StdEncoding.EncodeToString(in), - }).Do() - if err != nil { - return nil, fmt.Errorf("unable to sign bytes: %w", err) - } - out, err := base64.StdEncoding.DecodeString(resp.SignedBlob) - if err != nil { - return nil, fmt.Errorf("unable to base64 decode response: %w", err) - } - return out, nil - } -} - -// BucketAttrs represents the metadata for a Google Cloud Storage bucket. -// Read-only fields are ignored by BucketHandle.Create. -type BucketAttrs struct { - // Name is the name of the bucket. - // This field is read-only. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of - // UniformBucketLevelAccess is recommended above the use of this field. - // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to - // true, will enable UniformBucketLevelAccess. - BucketPolicyOnly BucketPolicyOnly - - // UniformBucketLevelAccess configures access checks to use only bucket-level IAM - // policies and ignore any ACL rules for the bucket. - // See https://cloud.google.com/storage/docs/uniform-bucket-level-access - // for more information. - UniformBucketLevelAccess UniformBucketLevelAccess - - // PublicAccessPrevention is the setting for the bucket's - // PublicAccessPrevention policy, which can be used to prevent public access - // of data in the bucket. See - // https://cloud.google.com/storage/docs/public-access-prevention for more - // information. - PublicAccessPrevention PublicAccessPrevention - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // DefaultEventBasedHold is the default value for event-based hold on - // newly created objects in this bucket. It defaults to false. - DefaultEventBasedHold bool - - // If not empty, applies a predefined set of access controls. It should be set - // only when creating a bucket. - // It is always empty for BucketAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - // for valid values. - PredefinedACL string - - // If not empty, applies a predefined set of default object access controls. - // It should be set only when creating a bucket. - // It is always empty for BucketAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert - // for valid values. - PredefinedDefaultObjectACL string - - // Location is the location of the bucket. It defaults to "US". - // If specifying a dual-region, CustomPlacementConfig should be set in conjunction. - Location string - - // The bucket's custom placement configuration that holds a list of - // regional locations for custom dual regions. - CustomPlacementConfig *CustomPlacementConfig - - // MetaGeneration is the metadata generation of the bucket. - // This field is read-only. - MetaGeneration int64 - - // StorageClass is the default storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD", "NEARLINE", - // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // Created is the creation time of the bucket. - // This field is read-only. - Created time.Time - - // VersioningEnabled reports whether this bucket has versioning enabled. - VersioningEnabled bool - - // Labels are the bucket's labels. - Labels map[string]string - - // RequesterPays reports whether the bucket is a Requester Pays bucket. - // Clients performing operations on Requester Pays buckets must provide - // a user project (see BucketHandle.UserProject), which will be billed - // for the operations. - RequesterPays bool - - // Lifecycle is the lifecycle configuration for objects in the bucket. - Lifecycle Lifecycle - - // Retention policy enforces a minimum retention time for all objects - // contained in the bucket. A RetentionPolicy of nil implies the bucket - // has no minimum data retention. - // - // This feature is in private alpha release. It is not currently available to - // most customers. It might be changed in backwards-incompatible ways and is not - // subject to any SLA or deprecation policy. - RetentionPolicy *RetentionPolicy - - // The bucket's Cross-Origin Resource Sharing (CORS) configuration. - CORS []CORS - - // The encryption configuration used by default for newly inserted objects. - Encryption *BucketEncryption - - // The logging configuration. - Logging *BucketLogging - - // The website configuration. - Website *BucketWebsite - - // Etag is the HTTP/1.1 Entity tag for the bucket. - // This field is read-only. - Etag string - - // LocationType describes how data is stored and replicated. - // Typical values are "multi-region", "region" and "dual-region". - // This field is read-only. - LocationType string - - // The project number of the project the bucket belongs to. - // This field is read-only. - ProjectNumber uint64 - - // RPO configures the Recovery Point Objective (RPO) policy of the bucket. - // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. - // See https://cloud.google.com/storage/docs/managing-turbo-replication for - // more information. - RPO RPO - - // Autoclass holds the bucket's autoclass configuration. If enabled, - // allows for the automatic selection of the best storage class - // based on object access patterns. - Autoclass *Autoclass - - // ObjectRetentionMode reports whether individual objects in the bucket can - // be configured with a retention policy. An empty value means that object - // retention is disabled. - // This field is read-only. Object retention can be enabled only by creating - // a bucket with SetObjectRetention set to true on the BucketHandle. It - // cannot be modified once the bucket is created. - // ObjectRetention cannot be configured or reported through the gRPC API. - ObjectRetentionMode string -} - -// BucketPolicyOnly is an alias for UniformBucketLevelAccess. -// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly. -type BucketPolicyOnly struct { - // Enabled specifies whether access checks use only bucket-level IAM - // policies. Enabled may be disabled until the locked time. - Enabled bool - // LockedTime specifies the deadline for changing Enabled from true to - // false. - LockedTime time.Time -} - -// UniformBucketLevelAccess configures access checks to use only bucket-level IAM -// policies. -type UniformBucketLevelAccess struct { - // Enabled specifies whether access checks use only bucket-level IAM - // policies. Enabled may be disabled until the locked time. - Enabled bool - // LockedTime specifies the deadline for changing Enabled from true to - // false. - LockedTime time.Time -} - -// PublicAccessPrevention configures the Public Access Prevention feature, which -// can be used to disallow public access to any data in a bucket. See -// https://cloud.google.com/storage/docs/public-access-prevention for more -// information. -type PublicAccessPrevention int - -const ( - // PublicAccessPreventionUnknown is a zero value, used only if this field is - // not set in a call to GCS. - PublicAccessPreventionUnknown PublicAccessPrevention = iota - - // PublicAccessPreventionUnspecified corresponds to a value of "unspecified". - // Deprecated: use PublicAccessPreventionInherited - PublicAccessPreventionUnspecified - - // PublicAccessPreventionEnforced corresponds to a value of "enforced". This - // enforces Public Access Prevention on the bucket. - PublicAccessPreventionEnforced - - // PublicAccessPreventionInherited corresponds to a value of "inherited" - // and is the default for buckets. - PublicAccessPreventionInherited - - publicAccessPreventionUnknown string = "" - // TODO: remove unspecified when change is fully completed - publicAccessPreventionUnspecified = "unspecified" - publicAccessPreventionEnforced = "enforced" - publicAccessPreventionInherited = "inherited" -) - -func (p PublicAccessPrevention) String() string { - switch p { - case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified: - return publicAccessPreventionInherited - case PublicAccessPreventionEnforced: - return publicAccessPreventionEnforced - default: - return publicAccessPreventionUnknown - } -} - -// Lifecycle is the lifecycle configuration for objects in the bucket. -type Lifecycle struct { - Rules []LifecycleRule -} - -// RetentionPolicy enforces a minimum retention time for all objects -// contained in the bucket. -// -// Any attempt to overwrite or delete objects younger than the retention -// period will result in an error. An unlocked retention policy can be -// modified or removed from the bucket via the Update method. A -// locked retention policy cannot be removed or shortened in duration -// for the lifetime of the bucket. -// -// This feature is in private alpha release. It is not currently available to -// most customers. It might be changed in backwards-incompatible ways and is not -// subject to any SLA or deprecation policy. -type RetentionPolicy struct { - // RetentionPeriod specifies the duration that objects need to be - // retained. Retention duration must be greater than zero and less than - // 100 years. Note that enforcement of retention periods less than a day - // is not guaranteed. Such periods should only be used for testing - // purposes. - RetentionPeriod time.Duration - - // EffectiveTime is the time from which the policy was enforced and - // effective. This field is read-only. - EffectiveTime time.Time - - // IsLocked describes whether the bucket is locked. Once locked, an - // object retention policy cannot be modified. - // This field is read-only. - IsLocked bool -} - -const ( - // RFC3339 timestamp with only the date segment, used for CreatedBefore, - // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. - rfc3339Date = "2006-01-02" - - // DeleteAction is a lifecycle action that deletes a live and/or archived - // objects. Takes precedence over SetStorageClass actions. - DeleteAction = "Delete" - - // SetStorageClassAction changes the storage class of live and/or archived - // objects. - SetStorageClassAction = "SetStorageClass" - - // AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete - // multipart upload when the multipart upload meets the conditions specified - // in the lifecycle rule. The AgeInDays condition is the only allowed - // condition for this action. AgeInDays is measured from the time the - // multipart upload was created. - AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload" -) - -// LifecycleRule is a lifecycle configuration rule. -// -// When all the configured conditions are met by an object in the bucket, the -// configured action will automatically be taken on that object. -type LifecycleRule struct { - // Action is the action to take when all of the associated conditions are - // met. - Action LifecycleAction - - // Condition is the set of conditions that must be met for the associated - // action to be taken. - Condition LifecycleCondition -} - -// LifecycleAction is a lifecycle configuration action. -type LifecycleAction struct { - // Type is the type of action to take on matching objects. - // - // Acceptable values are storage.DeleteAction, storage.SetStorageClassAction, - // and storage.AbortIncompleteMPUAction. - Type string - - // StorageClass is the storage class to set on matching objects if the Action - // is "SetStorageClass". - StorageClass string -} - -// Liveness specifies whether the object is live or not. -type Liveness int - -const ( - // LiveAndArchived includes both live and archived objects. - LiveAndArchived Liveness = iota - // Live specifies that the object is still live. - Live - // Archived specifies that the object is archived. - Archived -) - -// LifecycleCondition is a set of conditions used to match objects and take an -// action automatically. -// -// All configured conditions must be met for the associated action to be taken. -type LifecycleCondition struct { - // AllObjects is used to select all objects in a bucket by - // setting AgeInDays to 0. - AllObjects bool - - // AgeInDays is the age of the object in days. - // If you want to set AgeInDays to `0` use AllObjects set to `true`. - AgeInDays int64 - - // CreatedBefore is the time the object was created. - // - // This condition is satisfied when an object is created before midnight of - // the specified date in UTC. - CreatedBefore time.Time - - // CustomTimeBefore is the CustomTime metadata field of the object. This - // condition is satisfied when an object's CustomTime timestamp is before - // midnight of the specified date in UTC. - // - // This condition can only be satisfied if CustomTime has been set. - CustomTimeBefore time.Time - - // DaysSinceCustomTime is the days elapsed since the CustomTime date of the - // object. This condition can only be satisfied if CustomTime has been set. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - DaysSinceCustomTime int64 - - // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp - // of the object. This condition is relevant only for versioned objects. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - DaysSinceNoncurrentTime int64 - - // Liveness specifies the object's liveness. Relevant only for versioned objects - Liveness Liveness - - // MatchesPrefix is the condition matching an object if any of the - // matches_prefix strings are an exact prefix of the object's name. - MatchesPrefix []string - - // MatchesStorageClasses is the condition matching the object's storage - // class. - // - // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". - MatchesStorageClasses []string - - // MatchesSuffix is the condition matching an object if any of the - // matches_suffix strings are an exact suffix of the object's name. - MatchesSuffix []string - - // NoncurrentTimeBefore is the noncurrent timestamp of the object. This - // condition is satisfied when an object's noncurrent timestamp is before - // midnight of the specified date in UTC. - // - // This condition is relevant only for versioned objects. - NoncurrentTimeBefore time.Time - - // NumNewerVersions is the condition matching objects with a number of newer versions. - // - // If the value is N, this condition is satisfied when there are at least N - // versions (including the live version) newer than this version of the - // object. - // Note: Using `0` as the value will be ignored by the library and not sent to the API. - NumNewerVersions int64 -} - -// BucketLogging holds the bucket's logging configuration, which defines the -// destination bucket and optional name prefix for the current bucket's -// logs. -type BucketLogging struct { - // The destination bucket where the current bucket's logs - // should be placed. - LogBucket string - - // A prefix for log object names. - LogObjectPrefix string -} - -// BucketWebsite holds the bucket's website configuration, controlling how the -// service behaves when accessing bucket contents as a web site. See -// https://cloud.google.com/storage/docs/static-website for more information. -type BucketWebsite struct { - // If the requested object path is missing, the service will ensure the path has - // a trailing '/', append this suffix, and attempt to retrieve the resulting - // object. This allows the creation of index.html objects to represent directory - // pages. - MainPageSuffix string - - // If the requested object path is missing, and any mainPageSuffix object is - // missing, if applicable, the service will return the named object from this - // bucket as the content for a 404 Not Found result. - NotFoundPage string -} - -// CustomPlacementConfig holds the bucket's custom placement -// configuration for Custom Dual Regions. See -// https://cloud.google.com/storage/docs/locations#location-dr for more information. -type CustomPlacementConfig struct { - // The list of regional locations in which data is placed. - // Custom Dual Regions require exactly 2 regional locations. - DataLocations []string -} - -// Autoclass holds the bucket's autoclass configuration. If enabled, -// allows for the automatic selection of the best storage class -// based on object access patterns. See -// https://cloud.google.com/storage/docs/using-autoclass for more information. -type Autoclass struct { - // Enabled specifies whether the autoclass feature is enabled - // on the bucket. - Enabled bool - // ToggleTime is the time from which Autoclass was last toggled. - // If Autoclass is enabled when the bucket is created, the ToggleTime - // is set to the bucket creation time. This field is read-only. - ToggleTime time.Time - // TerminalStorageClass: The storage class that objects in the bucket - // eventually transition to if they are not read for a certain length of - // time. Valid values are NEARLINE and ARCHIVE. - TerminalStorageClass string - // TerminalStorageClassUpdateTime represents the time of the most recent - // update to "TerminalStorageClass". - TerminalStorageClassUpdateTime time.Time -} - -func newBucket(b *raw.Bucket) (*BucketAttrs, error) { - if b == nil { - return nil, nil - } - rp, err := toRetentionPolicy(b.RetentionPolicy) - if err != nil { - return nil, err - } - - return &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - DefaultEventBasedHold: b.DefaultEventBasedHold, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, - ACL: toBucketACLRules(b.Acl), - DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), - Labels: b.Labels, - RequesterPays: b.Billing != nil && b.Billing.RequesterPays, - Lifecycle: toLifecycle(b.Lifecycle), - RetentionPolicy: rp, - ObjectRetentionMode: toBucketObjectRetention(b.ObjectRetention), - CORS: toCORS(b.Cors), - Encryption: toBucketEncryption(b.Encryption), - Logging: toBucketLogging(b.Logging), - Website: toBucketWebsite(b.Website), - BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), - UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), - PublicAccessPrevention: toPublicAccessPrevention(b.IamConfiguration), - Etag: b.Etag, - LocationType: b.LocationType, - ProjectNumber: b.ProjectNumber, - RPO: toRPO(b), - CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig), - Autoclass: toAutoclassFromRaw(b.Autoclass), - }, nil -} - -func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs { - if b == nil { - return nil - } - return &BucketAttrs{ - Name: parseBucketName(b.GetName()), - Location: b.GetLocation(), - MetaGeneration: b.GetMetageneration(), - DefaultEventBasedHold: b.GetDefaultEventBasedHold(), - StorageClass: b.GetStorageClass(), - Created: b.GetCreateTime().AsTime(), - VersioningEnabled: b.GetVersioning().GetEnabled(), - ACL: toBucketACLRulesFromProto(b.GetAcl()), - DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()), - Labels: b.GetLabels(), - RequesterPays: b.GetBilling().GetRequesterPays(), - Lifecycle: toLifecycleFromProto(b.GetLifecycle()), - RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()), - CORS: toCORSFromProto(b.GetCors()), - Encryption: toBucketEncryptionFromProto(b.GetEncryption()), - Logging: toBucketLoggingFromProto(b.GetLogging()), - Website: toBucketWebsiteFromProto(b.GetWebsite()), - BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()), - UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()), - PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()), - LocationType: b.GetLocationType(), - RPO: toRPOFromProto(b), - CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()), - ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based - Autoclass: toAutoclassFromProto(b.GetAutoclass()), - } -} - -// toRawBucket copies the editable attribute from b to the raw library's Bucket type. -func (b *BucketAttrs) toRawBucket() *raw.Bucket { - // Copy label map. - var labels map[string]string - if len(b.Labels) > 0 { - labels = make(map[string]string, len(b.Labels)) - for k, v := range b.Labels { - labels[k] = v - } - } - // Ignore VersioningEnabled if it is false. This is OK because - // we only call this method when creating a bucket, and by default - // new buckets have versioning off. - var v *raw.BucketVersioning - if b.VersioningEnabled { - v = &raw.BucketVersioning{Enabled: true} - } - var bb *raw.BucketBilling - if b.RequesterPays { - bb = &raw.BucketBilling{RequesterPays: true} - } - var bktIAM *raw.BucketIamConfiguration - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &raw.BucketIamConfiguration{} - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: true, - } - } - if b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() - } - } - return &raw.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toRawBucketACL(b.ACL), - DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toRawLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), - Cors: toRawCORS(b.CORS), - Encryption: b.Encryption.toRawBucketEncryption(), - Logging: b.Logging.toRawBucketLogging(), - Website: b.Website.toRawBucketWebsite(), - IamConfiguration: bktIAM, - Rpo: b.RPO.String(), - CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(), - Autoclass: b.Autoclass.toRawAutoclass(), - } -} - -func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket { - if b == nil { - return &storagepb.Bucket{} - } - - // Copy label map. - var labels map[string]string - if len(b.Labels) > 0 { - labels = make(map[string]string, len(b.Labels)) - for k, v := range b.Labels { - labels[k] = v - } - } - - // Ignore VersioningEnabled if it is false. This is OK because - // we only call this method when creating a bucket, and by default - // new buckets have versioning off. - var v *storagepb.Bucket_Versioning - if b.VersioningEnabled { - v = &storagepb.Bucket_Versioning{Enabled: true} - } - var bb *storagepb.Bucket_Billing - if b.RequesterPays { - bb = &storagepb.Bucket_Billing{RequesterPays: true} - } - var bktIAM *storagepb.Bucket_IamConfig - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &storagepb.Bucket_IamConfig{} - if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: true, - } - } - if b.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String() - } - } - - return &storagepb.Bucket{ - Name: b.Name, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: toProtoBucketACL(b.ACL), - DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL), - Versioning: v, - Labels: labels, - Billing: bb, - Lifecycle: toProtoLifecycle(b.Lifecycle), - RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(), - Cors: toProtoCORS(b.CORS), - Encryption: b.Encryption.toProtoBucketEncryption(), - Logging: b.Logging.toProtoBucketLogging(), - Website: b.Website.toProtoBucketWebsite(), - IamConfig: bktIAM, - Rpo: b.RPO.String(), - CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(), - Autoclass: b.Autoclass.toProtoAutoclass(), - } -} - -func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { - if ua == nil { - return &storagepb.Bucket{} - } - - var v *storagepb.Bucket_Versioning - if ua.VersioningEnabled != nil { - v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} - } - var bb *storagepb.Bucket_Billing - if ua.RequesterPays != nil { - bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)} - } - - var bktIAM *storagepb.Bucket_IamConfig - if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM = &storagepb.Bucket_IamConfig{} - - if ua.BucketPolicyOnly != nil { - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled), - } - } - - if ua.UniformBucketLevelAccess != nil { - // UniformBucketLevelAccess takes precedence over BucketPolicyOnly, - // so Enabled will be overriden here if both are set - bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{ - Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled), - } - } - - if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String() - } - } - - var defaultHold bool - if ua.DefaultEventBasedHold != nil { - defaultHold = optional.ToBool(ua.DefaultEventBasedHold) - } - var lifecycle Lifecycle - if ua.Lifecycle != nil { - lifecycle = *ua.Lifecycle - } - var bktACL []*storagepb.BucketAccessControl - if ua.acl != nil { - bktACL = toProtoBucketACL(ua.acl) - } - if ua.PredefinedACL != "" { - // Clear ACL or the call will fail. - bktACL = nil - } - var bktDefaultObjectACL []*storagepb.ObjectAccessControl - if ua.defaultObjectACL != nil { - bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL) - } - if ua.PredefinedDefaultObjectACL != "" { - // Clear ACLs or the call will fail. - bktDefaultObjectACL = nil - } - - return &storagepb.Bucket{ - StorageClass: ua.StorageClass, - Acl: bktACL, - DefaultObjectAcl: bktDefaultObjectACL, - DefaultEventBasedHold: defaultHold, - Versioning: v, - Billing: bb, - Lifecycle: toProtoLifecycle(lifecycle), - RetentionPolicy: ua.RetentionPolicy.toProtoRetentionPolicy(), - Cors: toProtoCORS(ua.CORS), - Encryption: ua.Encryption.toProtoBucketEncryption(), - Logging: ua.Logging.toProtoBucketLogging(), - Website: ua.Website.toProtoBucketWebsite(), - IamConfig: bktIAM, - Rpo: ua.RPO.String(), - Autoclass: ua.Autoclass.toProtoAutoclass(), - Labels: ua.setLabels, - } -} - -// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. -type CORS struct { - // MaxAge is the value to return in the Access-Control-Max-Age - // header used in preflight responses. - MaxAge time.Duration - - // Methods is the list of HTTP methods on which to include CORS response - // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list - // of methods, and means "any method". - Methods []string - - // Origins is the list of Origins eligible to receive CORS response - // headers. Note: "*" is permitted in the list of origins, and means - // "any Origin". - Origins []string - - // ResponseHeaders is the list of HTTP headers other than the simple - // response headers to give permission for the user-agent to share - // across domains. - ResponseHeaders []string -} - -// BucketEncryption is a bucket's encryption configuration. -type BucketEncryption struct { - // A Cloud KMS key name, in the form - // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt - // objects inserted into this bucket, if no encryption method is specified. - // The key's location must be the same as the bucket's. - DefaultKMSKeyName string -} - -// BucketAttrsToUpdate define the attributes to update during an Update call. -type BucketAttrsToUpdate struct { - // If set, updates whether the bucket uses versioning. - VersioningEnabled optional.Bool - - // If set, updates whether the bucket is a Requester Pays bucket. - RequesterPays optional.Bool - - // DefaultEventBasedHold is the default value for event-based hold on - // newly created objects in this bucket. - DefaultEventBasedHold optional.Bool - - // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of - // UniformBucketLevelAccess is recommended above the use of this field. - // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to - // true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and - // UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess - // will take precedence. - BucketPolicyOnly *BucketPolicyOnly - - // UniformBucketLevelAccess configures access checks to use only bucket-level IAM - // policies and ignore any ACL rules for the bucket. - // See https://cloud.google.com/storage/docs/uniform-bucket-level-access - // for more information. - UniformBucketLevelAccess *UniformBucketLevelAccess - - // PublicAccessPrevention is the setting for the bucket's - // PublicAccessPrevention policy, which can be used to prevent public access - // of data in the bucket. See - // https://cloud.google.com/storage/docs/public-access-prevention for more - // information. - PublicAccessPrevention PublicAccessPrevention - - // StorageClass is the default storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD", "NEARLINE", - // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // If set, updates the retention policy of the bucket. Using - // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. - // - // This feature is in private alpha release. It is not currently available to - // most customers. It might be changed in backwards-incompatible ways and is not - // subject to any SLA or deprecation policy. - RetentionPolicy *RetentionPolicy - - // If set, replaces the CORS configuration with a new configuration. - // An empty (rather than nil) slice causes all CORS policies to be removed. - CORS []CORS - - // If set, replaces the encryption configuration of the bucket. Using - // BucketEncryption.DefaultKMSKeyName = "" will delete the existing - // configuration. - Encryption *BucketEncryption - - // If set, replaces the lifecycle configuration of the bucket. - Lifecycle *Lifecycle - - // If set, replaces the logging configuration of the bucket. - Logging *BucketLogging - - // If set, replaces the website configuration of the bucket. - Website *BucketWebsite - - // If not empty, applies a predefined set of access controls. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. - PredefinedACL string - - // If not empty, applies a predefined set of default object access controls. - // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. - PredefinedDefaultObjectACL string - - // RPO configures the Recovery Point Objective (RPO) policy of the bucket. - // Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket. - // See https://cloud.google.com/storage/docs/managing-turbo-replication for - // more information. - RPO RPO - - // If set, updates the autoclass configuration of the bucket. - // See https://cloud.google.com/storage/docs/using-autoclass for more information. - Autoclass *Autoclass - - // acl is the list of access control rules on the bucket. - // It is unexported and only used internally by the gRPC client. - // Library users should use ACLHandle methods directly. - acl []ACLRule - - // defaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - // It is unexported and only used internally by the gRPC client. - // Library users should use ACLHandle methods directly. - defaultObjectACL []ACLRule - - setLabels map[string]string - deleteLabels map[string]bool -} - -// SetLabel causes a label to be added or modified when ua is used -// in a call to Bucket.Update. -func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { - if ua.setLabels == nil { - ua.setLabels = map[string]string{} - } - ua.setLabels[name] = value -} - -// DeleteLabel causes a label to be deleted when ua is used in a -// call to Bucket.Update. -func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { - if ua.deleteLabels == nil { - ua.deleteLabels = map[string]bool{} - } - ua.deleteLabels[name] = true -} - -func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { - rb := &raw.Bucket{} - if ua.CORS != nil { - rb.Cors = toRawCORS(ua.CORS) - rb.ForceSendFields = append(rb.ForceSendFields, "Cors") - } - if ua.DefaultEventBasedHold != nil { - rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) - rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") - } - if ua.RetentionPolicy != nil { - if ua.RetentionPolicy.RetentionPeriod == 0 { - rb.NullFields = append(rb.NullFields, "RetentionPolicy") - rb.RetentionPolicy = nil - } else { - rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() - } - } - if ua.VersioningEnabled != nil { - rb.Versioning = &raw.BucketVersioning{ - Enabled: optional.ToBool(ua.VersioningEnabled), - ForceSendFields: []string{"Enabled"}, - } - } - if ua.RequesterPays != nil { - rb.Billing = &raw.BucketBilling{ - RequesterPays: optional.ToBool(ua.RequesterPays), - ForceSendFields: []string{"RequesterPays"}, - } - } - if ua.BucketPolicyOnly != nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: ua.BucketPolicyOnly.Enabled, - ForceSendFields: []string{"Enabled"}, - }, - } - } - if ua.UniformBucketLevelAccess != nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{ - UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: ua.UniformBucketLevelAccess.Enabled, - ForceSendFields: []string{"Enabled"}, - }, - } - } - if ua.PublicAccessPrevention != PublicAccessPreventionUnknown { - if rb.IamConfiguration == nil { - rb.IamConfiguration = &raw.BucketIamConfiguration{} - } - rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String() - } - if ua.Encryption != nil { - if ua.Encryption.DefaultKMSKeyName == "" { - rb.NullFields = append(rb.NullFields, "Encryption") - rb.Encryption = nil - } else { - rb.Encryption = ua.Encryption.toRawBucketEncryption() - } - } - if ua.Lifecycle != nil { - rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) - rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") - } - if ua.Logging != nil { - if *ua.Logging == (BucketLogging{}) { - rb.NullFields = append(rb.NullFields, "Logging") - rb.Logging = nil - } else { - rb.Logging = ua.Logging.toRawBucketLogging() - } - } - if ua.Website != nil { - if *ua.Website == (BucketWebsite{}) { - rb.NullFields = append(rb.NullFields, "Website") - rb.Website = nil - } else { - rb.Website = ua.Website.toRawBucketWebsite() - } - } - if ua.Autoclass != nil { - rb.Autoclass = &raw.BucketAutoclass{ - Enabled: ua.Autoclass.Enabled, - TerminalStorageClass: ua.Autoclass.TerminalStorageClass, - ForceSendFields: []string{"Enabled"}, - } - rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") - } - if ua.PredefinedACL != "" { - // Clear ACL or the call will fail. - rb.Acl = nil - rb.ForceSendFields = append(rb.ForceSendFields, "Acl") - } - if ua.PredefinedDefaultObjectACL != "" { - // Clear ACLs or the call will fail. - rb.DefaultObjectAcl = nil - rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") - } - - rb.StorageClass = ua.StorageClass - rb.Rpo = ua.RPO.String() - - if ua.setLabels != nil || ua.deleteLabels != nil { - rb.Labels = map[string]string{} - for k, v := range ua.setLabels { - rb.Labels[k] = v - } - if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { - rb.ForceSendFields = append(rb.ForceSendFields, "Labels") - } - for l := range ua.deleteLabels { - rb.NullFields = append(rb.NullFields, "Labels."+l) - } - } - return rb -} - -// If returns a new BucketHandle that applies a set of preconditions. -// Preconditions already set on the BucketHandle are ignored. The supplied -// BucketConditions must have exactly one field set to a non-zero value; -// otherwise an error will be returned from any operation on the BucketHandle. -// Operations on the new handle will return an error if the preconditions are not -// satisfied. The only valid preconditions for buckets are MetagenerationMatch -// and MetagenerationNotMatch. -func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { - b2 := *b - b2.conds = &conds - return &b2 -} - -// BucketConditions constrain bucket methods to act on specific metagenerations. -// -// The zero value is an empty set of constraints. -type BucketConditions struct { - // MetagenerationMatch specifies that the bucket must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the bucket must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - MetagenerationNotMatch int64 -} - -func (c *BucketConditions) validate(method string) error { - if *c == (BucketConditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -// UserProject returns a new BucketHandle that passes the project ID as the user -// project for all subsequent calls. Calls with a user project will be billed to that -// project rather than to the bucket's owning project. -// -// A user project is required for all operations on Requester Pays buckets. -func (b *BucketHandle) UserProject(projectID string) *BucketHandle { - b2 := *b - b2.userProject = projectID - b2.acl.userProject = projectID - b2.defaultObjectACL.userProject = projectID - return &b2 -} - -// LockRetentionPolicy locks a bucket's retention policy until a previously-configured -// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less -// than a day, the retention policy is treated as a development configuration and locking -// will have no effect. The BucketHandle must have a metageneration condition that -// matches the bucket's metageneration. See BucketHandle.If. -// -// This feature is in private alpha release. It is not currently available to -// most customers. It might be changed in backwards-incompatible ways and is not -// subject to any SLA or deprecation policy. -func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) -} - -// SetObjectRetention returns a new BucketHandle that will enable object retention -// on bucket creation. To enable object retention, you must use the returned -// handle to create the bucket. This has no effect on an already existing bucket. -// ObjectRetention is not enabled by default. -// ObjectRetention cannot be configured through the gRPC API. -func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle { - b2 := *b - b2.enableObjectRetention = &enable - return &b2 -} - -// applyBucketConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - cval := reflect.ValueOf(call) - switch { - case conds.MetagenerationMatch != 0: - if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -// applyBucketConds modifies the provided request message using the conditions -// in conds. msg is a protobuf Message that has fields if_metageneration_match -// and if_metageneration_not_match. -func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error { - rmsg := msg.ProtoReflect() - - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - - switch { - case conds.MetagenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { - if rp == nil { - return nil - } - return &raw.BucketRetentionPolicy{ - RetentionPeriod: int64(rp.RetentionPeriod / time.Second), - } -} - -func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy { - if rp == nil { - return nil - } - // RetentionPeriod must be greater than 0, so if it is 0, the user left it - // unset, and so we should not send it in the request i.e. nil is sent. - var dur *durationpb.Duration - if rp.RetentionPeriod != 0 { - dur = durationpb.New(rp.RetentionPeriod) - } - return &storagepb.Bucket_RetentionPolicy{ - RetentionDuration: dur, - } -} - -func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { - if rp == nil || rp.EffectiveTime == "" { - return nil, nil - } - t, err := time.Parse(time.RFC3339, rp.EffectiveTime) - if err != nil { - return nil, err - } - return &RetentionPolicy{ - RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, - EffectiveTime: t, - IsLocked: rp.IsLocked, - }, nil -} - -func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy { - if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 { - return nil - } - return &RetentionPolicy{ - RetentionPeriod: rp.GetRetentionDuration().AsDuration(), - EffectiveTime: rp.GetEffectiveTime().AsTime(), - IsLocked: rp.GetIsLocked(), - } -} - -func toBucketObjectRetention(or *raw.BucketObjectRetention) string { - if or == nil { - return "" - } - return or.Mode -} - -func toRawCORS(c []CORS) []*raw.BucketCors { - var out []*raw.BucketCors - for _, v := range c { - out = append(out, &raw.BucketCors{ - MaxAgeSeconds: int64(v.MaxAge / time.Second), - Method: v.Methods, - Origin: v.Origins, - ResponseHeader: v.ResponseHeaders, - }) - } - return out -} - -func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors { - var out []*storagepb.Bucket_Cors - for _, v := range c { - out = append(out, &storagepb.Bucket_Cors{ - MaxAgeSeconds: int32(v.MaxAge / time.Second), - Method: v.Methods, - Origin: v.Origins, - ResponseHeader: v.ResponseHeaders, - }) - } - return out -} - -func toCORS(rc []*raw.BucketCors) []CORS { - var out []CORS - for _, v := range rc { - out = append(out, CORS{ - MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, - Methods: v.Method, - Origins: v.Origin, - ResponseHeaders: v.ResponseHeader, - }) - } - return out -} - -func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS { - var out []CORS - for _, v := range rc { - out = append(out, CORS{ - MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second, - Methods: v.GetMethod(), - Origins: v.GetOrigin(), - ResponseHeaders: v.GetResponseHeader(), - }) - } - return out -} - -func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { - var rl raw.BucketLifecycle - if len(l.Rules) == 0 { - rl.ForceSendFields = []string{"Rule"} - } - for _, r := range l.Rules { - rr := &raw.BucketLifecycleRule{ - Action: &raw.BucketLifecycleRuleAction{ - Type: r.Action.Type, - StorageClass: r.Action.StorageClass, - }, - Condition: &raw.BucketLifecycleRuleCondition{ - DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, - DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, - MatchesPrefix: r.Condition.MatchesPrefix, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - MatchesSuffix: r.Condition.MatchesSuffix, - NumNewerVersions: r.Condition.NumNewerVersions, - }, - } - - // AllObjects takes precedent when both AllObjects and AgeInDays are set - // Rationale: If you've opted into using AllObjects, it makes sense that you - // understand the implications of how this option works with AgeInDays. - if r.Condition.AllObjects { - rr.Condition.Age = googleapi.Int64(0) - rr.Condition.ForceSendFields = []string{"Age"} - } else if r.Condition.AgeInDays > 0 { - rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays) - } - - switch r.Condition.Liveness { - case LiveAndArchived: - rr.Condition.IsLive = nil - case Live: - rr.Condition.IsLive = googleapi.Bool(true) - case Archived: - rr.Condition.IsLive = googleapi.Bool(false) - } - - if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) - } - if !r.Condition.CustomTimeBefore.IsZero() { - rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) - } - if !r.Condition.NoncurrentTimeBefore.IsZero() { - rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) - } - rl.Rule = append(rl.Rule, rr) - } - return &rl -} - -func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { - var rl storagepb.Bucket_Lifecycle - - for _, r := range l.Rules { - rr := &storagepb.Bucket_Lifecycle_Rule{ - Action: &storagepb.Bucket_Lifecycle_Rule_Action{ - Type: r.Action.Type, - StorageClass: r.Action.StorageClass, - }, - Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{ - // Note: The Apiary types use int64 (even though the Discovery - // doc states "format: int32"), so the client types used int64, - // but the proto uses int32 so we have a potentially lossy - // conversion. - DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), - DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), - MatchesPrefix: r.Condition.MatchesPrefix, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - MatchesSuffix: r.Condition.MatchesSuffix, - NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)), - }, - } - - // Only set AgeDays in the proto if it is non-zero, or if the user has set - // Condition.AllObjects. - if r.Condition.AgeInDays != 0 { - rr.Condition.AgeDays = proto.Int32(int32(r.Condition.AgeInDays)) - } - if r.Condition.AllObjects { - rr.Condition.AgeDays = proto.Int32(0) - } - - switch r.Condition.Liveness { - case LiveAndArchived: - rr.Condition.IsLive = nil - case Live: - rr.Condition.IsLive = proto.Bool(true) - case Archived: - rr.Condition.IsLive = proto.Bool(false) - } - - if !r.Condition.CreatedBefore.IsZero() { - rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore) - } - if !r.Condition.CustomTimeBefore.IsZero() { - rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore) - } - if !r.Condition.NoncurrentTimeBefore.IsZero() { - rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore) - } - rl.Rule = append(rl.Rule, rr) - } - return &rl -} - -func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { - var l Lifecycle - if rl == nil { - return l - } - for _, rr := range rl.Rule { - r := LifecycleRule{ - Action: LifecycleAction{ - Type: rr.Action.Type, - StorageClass: rr.Action.StorageClass, - }, - Condition: LifecycleCondition{ - DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, - DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, - MatchesPrefix: rr.Condition.MatchesPrefix, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - MatchesSuffix: rr.Condition.MatchesSuffix, - NumNewerVersions: rr.Condition.NumNewerVersions, - }, - } - if rr.Condition.Age != nil { - r.Condition.AgeInDays = *rr.Condition.Age - if *rr.Condition.Age == 0 { - r.Condition.AllObjects = true - } - } - - if rr.Condition.IsLive == nil { - r.Condition.Liveness = LiveAndArchived - } else if *rr.Condition.IsLive { - r.Condition.Liveness = Live - } else { - r.Condition.Liveness = Archived - } - - if rr.Condition.CreatedBefore != "" { - r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) - } - if rr.Condition.CustomTimeBefore != "" { - r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) - } - if rr.Condition.NoncurrentTimeBefore != "" { - r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) - } - l.Rules = append(l.Rules, r) - } - return l -} - -func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { - var l Lifecycle - if rl == nil { - return l - } - for _, rr := range rl.GetRule() { - r := LifecycleRule{ - Action: LifecycleAction{ - Type: rr.GetAction().GetType(), - StorageClass: rr.GetAction().GetStorageClass(), - }, - Condition: LifecycleCondition{ - AgeInDays: int64(rr.GetCondition().GetAgeDays()), - DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()), - DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()), - MatchesPrefix: rr.GetCondition().GetMatchesPrefix(), - MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(), - MatchesSuffix: rr.GetCondition().GetMatchesSuffix(), - NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()), - }, - } - - // Only set Condition.AllObjects if AgeDays is zero, not if it is nil. - if rr.GetCondition().AgeDays != nil && rr.GetCondition().GetAgeDays() == 0 { - r.Condition.AllObjects = true - } - - if rr.GetCondition().IsLive == nil { - r.Condition.Liveness = LiveAndArchived - } else if rr.GetCondition().GetIsLive() { - r.Condition.Liveness = Live - } else { - r.Condition.Liveness = Archived - } - - if rr.GetCondition().GetCreatedBefore() != nil { - r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore()) - } - if rr.GetCondition().GetCustomTimeBefore() != nil { - r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore()) - } - if rr.GetCondition().GetNoncurrentTimeBefore() != nil { - r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore()) - } - l.Rules = append(l.Rules, r) - } - return l -} - -func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { - if e == nil { - return nil - } - return &raw.BucketEncryption{ - DefaultKmsKeyName: e.DefaultKMSKeyName, - } -} - -func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption { - if e == nil { - return nil - } - return &storagepb.Bucket_Encryption{ - DefaultKmsKey: e.DefaultKMSKeyName, - } -} - -func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { - if e == nil { - return nil - } - return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} -} - -func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption { - if e == nil { - return nil - } - return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()} -} - -func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { - if b == nil { - return nil - } - return &raw.BucketLogging{ - LogBucket: b.LogBucket, - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging { - if b == nil { - return nil - } - return &storagepb.Bucket_Logging{ - LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket), - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func toBucketLogging(b *raw.BucketLogging) *BucketLogging { - if b == nil { - return nil - } - return &BucketLogging{ - LogBucket: b.LogBucket, - LogObjectPrefix: b.LogObjectPrefix, - } -} - -func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging { - if b == nil { - return nil - } - lb := parseBucketName(b.GetLogBucket()) - return &BucketLogging{ - LogBucket: lb, - LogObjectPrefix: b.GetLogObjectPrefix(), - } -} - -func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { - if w == nil { - return nil - } - return &raw.BucketWebsite{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website { - if w == nil { - return nil - } - return &storagepb.Bucket_Website{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { - if w == nil { - return nil - } - return &BucketWebsite{ - MainPageSuffix: w.MainPageSuffix, - NotFoundPage: w.NotFoundPage, - } -} - -func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite { - if w == nil { - return nil - } - return &BucketWebsite{ - MainPageSuffix: w.GetMainPageSuffix(), - NotFoundPage: w.GetNotFoundPage(), - } -} - -func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { - if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { - return BucketPolicyOnly{} - } - lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) - if err != nil { - return BucketPolicyOnly{ - Enabled: true, - } - } - return BucketPolicyOnly{ - Enabled: true, - LockedTime: lt, - } -} - -func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly { - if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { - return BucketPolicyOnly{} - } - return BucketPolicyOnly{ - Enabled: true, - LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), - } -} - -func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { - if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { - return UniformBucketLevelAccess{} - } - lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime) - if err != nil { - return UniformBucketLevelAccess{ - Enabled: true, - } - } - return UniformBucketLevelAccess{ - Enabled: true, - LockedTime: lt, - } -} - -func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess { - if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() { - return UniformBucketLevelAccess{} - } - return UniformBucketLevelAccess{ - Enabled: true, - LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(), - } -} - -func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention { - if b == nil { - return PublicAccessPreventionUnknown - } - switch b.PublicAccessPrevention { - case publicAccessPreventionInherited, publicAccessPreventionUnspecified: - return PublicAccessPreventionInherited - case publicAccessPreventionEnforced: - return PublicAccessPreventionEnforced - default: - return PublicAccessPreventionUnknown - } -} - -func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention { - if b == nil { - return PublicAccessPreventionUnknown - } - switch b.GetPublicAccessPrevention() { - case publicAccessPreventionInherited, publicAccessPreventionUnspecified: - return PublicAccessPreventionInherited - case publicAccessPreventionEnforced: - return PublicAccessPreventionEnforced - default: - return PublicAccessPreventionUnknown - } -} - -func toRPO(b *raw.Bucket) RPO { - if b == nil { - return RPOUnknown - } - switch b.Rpo { - case rpoDefault: - return RPODefault - case rpoAsyncTurbo: - return RPOAsyncTurbo - default: - return RPOUnknown - } -} - -func toRPOFromProto(b *storagepb.Bucket) RPO { - if b == nil { - return RPOUnknown - } - switch b.GetRpo() { - case rpoDefault: - return RPODefault - case rpoAsyncTurbo: - return RPOAsyncTurbo - default: - return RPOUnknown - } -} - -func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig { - if c == nil { - return nil - } - return &CustomPlacementConfig{DataLocations: c.DataLocations} -} - -func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig { - if c == nil { - return nil - } - return &raw.BucketCustomPlacementConfig{ - DataLocations: c.DataLocations, - } -} - -func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig { - if c == nil { - return nil - } - return &storagepb.Bucket_CustomPlacementConfig{ - DataLocations: c.DataLocations, - } -} - -func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig { - if c == nil { - return nil - } - return &CustomPlacementConfig{DataLocations: c.GetDataLocations()} -} - -func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { - if a == nil { - return nil - } - // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. - return &raw.BucketAutoclass{ - Enabled: a.Enabled, - TerminalStorageClass: a.TerminalStorageClass, - } -} - -func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { - if a == nil { - return nil - } - // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. - ba := &storagepb.Bucket_Autoclass{ - Enabled: a.Enabled, - } - if a.TerminalStorageClass != "" { - ba.TerminalStorageClass = &a.TerminalStorageClass - } - return ba -} - -func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { - if a == nil || a.ToggleTime == "" { - return nil - } - ac := &Autoclass{ - Enabled: a.Enabled, - TerminalStorageClass: a.TerminalStorageClass, - } - // Return ToggleTime and TSCUpdateTime only if parsed with valid values. - t, err := time.Parse(time.RFC3339, a.ToggleTime) - if err == nil { - ac.ToggleTime = t - } - ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime) - if err == nil { - ac.TerminalStorageClassUpdateTime = ut - } - return ac -} - -func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { - if a == nil || a.GetToggleTime().AsTime().Unix() == 0 { - return nil - } - return &Autoclass{ - Enabled: a.GetEnabled(), - ToggleTime: a.GetToggleTime().AsTime(), - TerminalStorageClass: a.GetTerminalStorageClass(), - TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(), - } -} - -// Objects returns an iterator over the objects in the bucket that match the -// Query q. If q is nil, no filtering is done. Objects will be iterated over -// lexicographically by name. -// -// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. -func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { - o := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.ListObjects(ctx, b.name, q, o...) -} - -// Retryer returns a bucket handle that is configured with custom retry -// behavior as specified by the options that are passed to it. All operations -// on the new handle will use the customized retry configuration. -// Retry options set on a object handle will take precedence over options set on -// the bucket handle. -// These retry options will merge with the client's retry configuration (if set) -// for the returned handle. Options passed into this method will take precedence -// over retry options on the client. Note that you must explicitly pass in each -// option you want to override. -func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle { - b2 := *b - var retry *retryConfig - if b.retry != nil { - // merge the options with the existing retry - retry = b.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - b2.retry = retry - b2.acl.retry = retry - b2.defaultObjectACL.retry = retry - return &b2 -} - -// An ObjectIterator is an iterator over ObjectAttrs. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -type ObjectIterator struct { - ctx context.Context - query Query - pageInfo *iterator.PageInfo - nextFunc func() error - items []*ObjectAttrs -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// In addition, if Next returns an error other than iterator.Done, all -// subsequent calls will return the same error. To continue iteration, a new -// `ObjectIterator` must be created. Since objects are ordered lexicographically -// by name, `Query.StartOffset` can be used to create a new iterator which will -// start at the desired place. See -// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. -// -// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will -// have a non-empty Prefix field, and a zero value for all other fields. These -// represent prefixes. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *ObjectIterator) Next() (*ObjectAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - item := it.items[0] - it.items = it.items[1:] - return item, nil -} - -// Buckets returns an iterator over the buckets in the project. You may -// optionally set the iterator's Prefix field to restrict the list to buckets -// whose names begin with the prefix. By default, all buckets in the project -// are returned. -// -// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. -func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { - o := makeStorageOpts(true, c.retry, "") - return c.tc.ListBuckets(ctx, projectID, o...) -} - -// A BucketIterator is an iterator over BucketAttrs. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -type BucketIterator struct { - // Prefix restricts the iterator to buckets whose names begin with it. - Prefix string - - ctx context.Context - projectID string - buckets []*BucketAttrs - pageInfo *iterator.PageInfo - nextFunc func() error -} - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *BucketIterator) Next() (*BucketAttrs, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - b := it.buckets[0] - it.buckets = it.buckets[1:] - return b, nil -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This method is not safe for concurrent operations without explicit synchronization. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -// RPO (Recovery Point Objective) configures the turbo replication feature. See -// https://cloud.google.com/storage/docs/managing-turbo-replication for more information. -type RPO int - -const ( - // RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO - // is not present in the bucket metadata, that is, the bucket is not dual-region. - // This value is also used if the RPO field is not set in a call to GCS. - RPOUnknown RPO = iota - - // RPODefault represents default replication. It is used to reset RPO on an - // existing bucket that has this field set to RPOAsyncTurbo. Otherwise it - // is equivalent to RPOUnknown, and is always ignored. This value is valid - // for dual- or multi-region buckets. - RPODefault - - // RPOAsyncTurbo represents turbo replication and is used to enable Turbo - // Replication on a bucket. This value is only valid for dual-region buckets. - RPOAsyncTurbo - - rpoUnknown string = "" - rpoDefault = "DEFAULT" - rpoAsyncTurbo = "ASYNC_TURBO" -) - -func (rpo RPO) String() string { - switch rpo { - case RPODefault: - return rpoDefault - case RPOAsyncTurbo: - return rpoAsyncTurbo - default: - return rpoUnknown - } -} - -// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func protoDateToUTCTime(d *dpb.Date) time.Time { - return protoDateToTime(d, time.UTC) -} - -// protoDateToTime returns a new Time based on the google.type.Date and provided -// *time.Location. -// -// Hours, minutes, seconds, and nanoseconds are set to 0. -func protoDateToTime(d *dpb.Date, l *time.Location) time.Time { - return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l) -} - -// timeToProtoDate returns a new google.type.Date based on the provided time.Time. -// The location is ignored, as is anything more precise than the day. -func timeToProtoDate(t time.Time) *dpb.Date { - return &dpb.Date{ - Year: int32(t.Year()), - Month: int32(t.Month()), - Day: int32(t.Day()), - } -} diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go deleted file mode 100644 index 4906b1d1f..000000000 --- a/vendor/cloud.google.com/go/storage/client.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "io" - "time" - - "cloud.google.com/go/iam/apiv1/iampb" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/option" -) - -// TODO(noahdietz): Move existing factory methods to this file. - -// storageClient is an internal-only interface designed to separate the -// transport-specific logic of making Storage API calls from the logic of the -// client library. -// -// Implementation requirements beyond implementing the interface include: -// * factory method(s) must accept a `userProject string` param -// * `settings` must be retained per instance -// * `storageOption`s must be resolved in the order they are received -// * all API errors must be wrapped in the gax-go APIError type -// * any unimplemented interface methods must return a StorageUnimplementedErr -// -// TODO(noahdietz): This interface is currently not used in the production code -// paths -type storageClient interface { - - // Top-level methods. - - GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) - CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) - ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator - Close() error - - // Bucket methods. - - DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error - GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) - UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) - LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error - ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator - - // Object metadata methods. - - DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error - GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) - UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) - - // Default Object ACL methods. - - DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error - ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Bucket ACL methods. - - DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error - ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) - UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Object ACL methods. - - DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error - ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) - UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error - - // Media operations. - - ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) - RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) - - NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error) - OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) - - // IAM methods. - - GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) - SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error - TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) - - // HMAC Key methods. - - GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) - ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator - UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) - CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) - DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error - - // Notification methods. - ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error) - CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error) - DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error -} - -// settings contains transport-agnostic configuration for API calls made via -// the storageClient inteface. All implementations must utilize settings -// and respect those that are applicable. -type settings struct { - // retry is the complete retry configuration to use when evaluating if an - // API call should be retried. - retry *retryConfig - - // gax is a set of gax.CallOption to be conveyed to gax.Invoke. - // Note: Not all storageClient interfaces will must use gax.Invoke. - gax []gax.CallOption - - // idempotent indicates if the call is idempotent or not when considering - // if the call should be retired or not. - idempotent bool - - // clientOption is a set of option.ClientOption to be used during client - // transport initialization. See https://pkg.go.dev/google.golang.org/api/option - // for a list of supported options. - clientOption []option.ClientOption - - // userProject is the user project that should be billed for the request. - userProject string -} - -func initSettings(opts ...storageOption) *settings { - s := &settings{} - resolveOptions(s, opts...) - return s -} - -func resolveOptions(s *settings, opts ...storageOption) { - for _, o := range opts { - o.Apply(s) - } -} - -// callSettings is a helper for resolving storage options against the settings -// in the context of an individual call. This is to ensure that client-level -// default settings are not mutated by two different calls getting options. -// -// Example: s := callSettings(c.settings, opts...) -func callSettings(defaults *settings, opts ...storageOption) *settings { - if defaults == nil { - return nil - } - // This does not make a deep copy of the pointer/slice fields, but all - // options replace the settings fields rather than modify their values in - // place. - cs := *defaults - resolveOptions(&cs, opts...) - return &cs -} - -// makeStorageOpts is a helper for generating a set of storageOption based on -// idempotency, retryConfig, and userProject. All top-level client operations -// will generally have to pass these options through the interface. -func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption { - opts := []storageOption{idempotent(isIdempotent)} - if retry != nil { - opts = append(opts, withRetryConfig(retry)) - } - if userProject != "" { - opts = append(opts, withUserProject(userProject)) - } - return opts -} - -// storageOption is the transport-agnostic call option for the storageClient -// interface. -type storageOption interface { - Apply(s *settings) -} - -func withGAXOptions(opts ...gax.CallOption) storageOption { - return &gaxOption{opts} -} - -type gaxOption struct { - opts []gax.CallOption -} - -func (o *gaxOption) Apply(s *settings) { s.gax = o.opts } - -func withRetryConfig(rc *retryConfig) storageOption { - return &retryOption{rc} -} - -type retryOption struct { - rc *retryConfig -} - -func (o *retryOption) Apply(s *settings) { s.retry = o.rc } - -func idempotent(i bool) storageOption { - return &idempotentOption{i} -} - -type idempotentOption struct { - idempotency bool -} - -func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency } - -func withClientOptions(opts ...option.ClientOption) storageOption { - return &clientOption{opts: opts} -} - -type clientOption struct { - opts []option.ClientOption -} - -func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts } - -func withUserProject(project string) storageOption { - return &userProjectOption{project} -} - -type userProjectOption struct { - project string -} - -func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project } - -type openWriterParams struct { - // Writer configuration - - // ctx is the context used by the writer routine to make all network calls - // and to manage the writer routine - see `Writer.ctx`. - // Required. - ctx context.Context - // chunkSize - see `Writer.ChunkSize`. - // Optional. - chunkSize int - // chunkRetryDeadline - see `Writer.ChunkRetryDeadline`. - // Optional. - chunkRetryDeadline time.Duration - - // Object/request properties - - // bucket - see `Writer.o.bucket`. - // Required. - bucket string - // attrs - see `Writer.ObjectAttrs`. - // Required. - attrs *ObjectAttrs - // conds - see `Writer.o.conds`. - // Optional. - conds *Conditions - // encryptionKey - see `Writer.o.encryptionKey` - // Optional. - encryptionKey []byte - // sendCRC32C - see `Writer.SendCRC32C`. - // Optional. - sendCRC32C bool - - // Writer callbacks - - // donec - see `Writer.donec`. - // Required. - donec chan struct{} - // setError callback for reporting errors - see `Writer.error`. - // Required. - setError func(error) - // progress callback for reporting upload progress - see `Writer.progress`. - // Required. - progress func(int64) - // setObj callback for reporting the resulting object - see `Writer.obj`. - // Required. - setObj func(*ObjectAttrs) -} - -type newRangeReaderParams struct { - bucket string - conds *Conditions - encryptionKey []byte - gen int64 - length int64 - object string - offset int64 - readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. -} - -type updateObjectParams struct { - bucket, object string - uattrs *ObjectAttrsToUpdate - gen int64 - encryptionKey []byte - conds *Conditions - overrideRetention *bool -} - -type composeObjectRequest struct { - dstBucket string - dstObject destinationObject - srcs []sourceObject - predefinedACL string - sendCRC32C bool -} - -type sourceObject struct { - name string - bucket string - gen int64 - conds *Conditions - encryptionKey []byte -} - -type destinationObject struct { - name string - bucket string - conds *Conditions - attrs *ObjectAttrs // attrs to set on the destination object. - encryptionKey []byte - keyName string -} - -type rewriteObjectRequest struct { - srcObject sourceObject - dstObject destinationObject - predefinedACL string - token string - maxBytesRewrittenPerCall int64 -} - -type rewriteObjectResponse struct { - resource *ObjectAttrs - done bool - written int64 - size int64 - token string -} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go deleted file mode 100644 index a0b9a2683..000000000 --- a/vendor/cloud.google.com/go/storage/copy.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - - "cloud.google.com/go/internal/trace" -) - -// CopierFrom creates a Copier that can copy src to dst. -// You can immediately call Run on the returned Copier, or -// you can configure it first. -// -// For Requester Pays buckets, the user project of dst is billed, unless it is empty, -// in which case the user project of src is billed. -func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { - return &Copier{dst: dst, src: src} -} - -// A Copier copies a source object to a destination. -type Copier struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Copier. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - // RewriteToken can be set before calling Run to resume a copy - // operation. After Run returns a non-nil error, RewriteToken will - // have been updated to contain the value needed to resume the copy. - RewriteToken string - - // ProgressFunc can be used to monitor the progress of a multi-RPC copy - // operation. If ProgressFunc is not nil and copying requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then - // ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far and the total size in bytes of the source object. - // - // ProgressFunc is intended to make upload progress available to the - // application. For example, the implementation of ProgressFunc may update - // a progress bar in the application's UI, or log the result of - // float64(copiedBytes)/float64(totalBytes). - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(copiedBytes, totalBytes uint64) - - // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, - // that will be used to encrypt the object. Overrides the object's KMSKeyName, if - // any. - // - // Providing both a DestinationKMSKeyName and a customer-supplied encryption key - // (via ObjectHandle.Key) on the destination object will result in an error when - // Run is called. - DestinationKMSKeyName string - - dst, src *ObjectHandle - - // The maximum number of bytes that will be rewritten per rewrite request. - // Most callers shouldn't need to specify this parameter - it is primarily - // in place to support testing. If specified the value must be an integral - // multiple of 1 MiB (1048576). Also, this only applies to requests where - // the source and destination span locations and/or storage classes. Finally, - // this value must not change across rewrite calls else you'll get an error - // that the `rewriteToken` is invalid. - maxBytesRewrittenPerCall int64 -} - -// Run performs the copy. -func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") - defer func() { trace.EndSpan(ctx, err) }() - - if err := c.src.validate(); err != nil { - return nil, err - } - if err := c.dst.validate(); err != nil { - return nil, err - } - if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { - return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") - } - if c.dst.gen != defaultGen { - return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen) - } - // Convert destination attributes to raw form, omitting the bucket. - // If the bucket is included but name or content-type aren't, the service - // returns a 400 with "Required" as the only message. Omitting the bucket - // does not cause any problems. - req := &rewriteObjectRequest{ - srcObject: sourceObject{ - name: c.src.object, - bucket: c.src.bucket, - gen: c.src.gen, - conds: c.src.conds, - encryptionKey: c.src.encryptionKey, - }, - dstObject: destinationObject{ - name: c.dst.object, - bucket: c.dst.bucket, - conds: c.dst.conds, - attrs: &c.ObjectAttrs, - encryptionKey: c.dst.encryptionKey, - keyName: c.DestinationKMSKeyName, - }, - predefinedACL: c.PredefinedACL, - token: c.RewriteToken, - maxBytesRewrittenPerCall: c.maxBytesRewrittenPerCall, - } - - isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - var userProject string - if c.dst.userProject != "" { - userProject = c.dst.userProject - } else if c.src.userProject != "" { - userProject = c.src.userProject - } - opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject) - - for { - res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...) - if err != nil { - return nil, err - } - c.RewriteToken = res.token - req.token = res.token - if c.ProgressFunc != nil { - c.ProgressFunc(uint64(res.written), uint64(res.size)) - } - if res.done { // Finished successfully. - return res.resource, nil - } - } -} - -// ComposerFrom creates a Composer that can compose srcs into dst. -// You can immediately call Run on the returned Composer, or you can -// configure it first. -// -// The encryption key for the destination object will be used to decrypt all -// source objects and encrypt the destination object. It is an error -// to specify an encryption key for any of the source objects. -func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { - return &Composer{dst: dst, srcs: srcs} -} - -// A Composer composes source objects into a destination object. -// -// For Requester Pays buckets, the user project of dst is billed. -type Composer struct { - // ObjectAttrs are optional attributes to set on the destination object. - // Any attributes must be initialized before any calls on the Composer. Nil - // or zero-valued attributes are ignored. - ObjectAttrs - - // SendCRC specifies whether to transmit a CRC32C field. It should be set - // to true in addition to setting the Composer's CRC32C field, because zero - // is a valid CRC and normally a zero would not be transmitted. - // If a CRC32C is sent, and the data in the destination object does not match - // the checksum, the compose will be rejected. - SendCRC32C bool - - dst *ObjectHandle - srcs []*ObjectHandle -} - -// Run performs the compose operation. -func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") - defer func() { trace.EndSpan(ctx, err) }() - - if err := c.dst.validate(); err != nil { - return nil, err - } - if c.dst.gen != defaultGen { - return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen) - } - if len(c.srcs) == 0 { - return nil, errors.New("storage: at least one source object must be specified") - } - - for _, src := range c.srcs { - if err := src.validate(); err != nil { - return nil, err - } - if src.bucket != c.dst.bucket { - return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) - } - if src.encryptionKey != nil { - return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) - } - } - - req := &composeObjectRequest{ - dstBucket: c.dst.bucket, - predefinedACL: c.PredefinedACL, - sendCRC32C: c.SendCRC32C, - } - req.dstObject = destinationObject{ - name: c.dst.object, - bucket: c.dst.bucket, - conds: c.dst.conds, - attrs: &c.ObjectAttrs, - encryptionKey: c.dst.encryptionKey, - } - for _, src := range c.srcs { - s := sourceObject{ - name: src.object, - bucket: src.bucket, - gen: src.gen, - conds: src.conds, - } - req.srcs = append(req.srcs, s) - } - - isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist) - opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject) - return c.dst.c.tc.ComposeObject(ctx, req, opts...) -} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go deleted file mode 100644 index 22adb744f..000000000 --- a/vendor/cloud.google.com/go/storage/doc.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package storage provides an easy way to work with Google Cloud Storage. -Google Cloud Storage stores data in named objects, which are grouped into buckets. - -More information about Google Cloud Storage is available at -https://cloud.google.com/storage/docs. - -See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts, -connection pooling and similar aspects of this package. - -# Creating a Client - -To start working with this package, create a [Client]: - - ctx := context.Background() - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } - -The client will use your default application credentials. Clients should be -reused instead of created as needed. The methods of [Client] are safe for -concurrent use by multiple goroutines. - -You may configure the client by passing in options from the [google.golang.org/api/option] -package. You may also use options defined in this package, such as [WithJSONReads]. - -If you only wish to access public data, you can create -an unauthenticated client with - - client, err := storage.NewClient(ctx, option.WithoutAuthentication()) - -To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST -environment variable to the address at which your emulator is running. This will -send requests to that address instead of to Cloud Storage. You can then create -and use a client as usual: - - // Set STORAGE_EMULATOR_HOST environment variable. - err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000") - if err != nil { - // TODO: Handle error. - } - - // Create client as usual. - client, err := storage.NewClient(ctx) - if err != nil { - // TODO: Handle error. - } - - // This request is now directed to http://localhost:9000/storage/v1/b - // instead of https://storage.googleapis.com/storage/v1/b - if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } - -Please note that there is no official emulator for Cloud Storage. - -# Buckets - -A Google Cloud Storage bucket is a collection of objects. To work with a -bucket, make a bucket handle: - - bkt := client.Bucket(bucketName) - -A handle is a reference to a bucket. You can have a handle even if the -bucket doesn't exist yet. To create a bucket in Google Cloud Storage, -call [BucketHandle.Create]: - - if err := bkt.Create(ctx, projectID, nil); err != nil { - // TODO: Handle error. - } - -Note that although buckets are associated with projects, bucket names are -global across all projects. - -Each bucket has associated metadata, represented in this package by -[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set -the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use -[BucketHandle.Attrs]: - - attrs, err := bkt.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", - attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) - -# Objects - -An object holds arbitrary data as a sequence of bytes, like a file. You -refer to objects using a handle, just as with buckets, but unlike buckets -you don't explicitly create an object. Instead, the first time you write -to an object it will be created. You can use the standard Go [io.Reader] -and [io.Writer] interfaces to read and write object data: - - obj := bkt.Object("data") - // Write something to obj. - // w implements io.Writer. - w := obj.NewWriter(ctx) - // Write some text to obj. This will either create the object or overwrite whatever is there already. - if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { - // TODO: Handle error. - } - // Close, just like writing a file. - if err := w.Close(); err != nil { - // TODO: Handle error. - } - - // Read it back. - r, err := obj.NewReader(ctx) - if err != nil { - // TODO: Handle error. - } - defer r.Close() - if _, err := io.Copy(os.Stdout, r); err != nil { - // TODO: Handle error. - } - // Prints "This object contains text." - -Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]: - - objAttrs, err := obj.Attrs(ctx) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("object %s has size %d and can be read using %s\n", - objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) - -# Listing objects - -Listing objects in a bucket is done with the [BucketHandle.Objects] method: - - query := &storage.Query{Prefix: ""} - - var names []string - it := bkt.Objects(ctx, query) - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - log.Fatal(err) - } - names = append(names, attrs.Name) - } - -Objects are listed lexicographically by name. To filter objects -lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used: - - query := &storage.Query{ - Prefix: "", - StartOffset: "bar/", // Only list objects lexicographically >= "bar/" - EndOffset: "foo/", // Only list objects lexicographically < "foo/" - } - - // ... as before - -If only a subset of object attributes is needed when listing, specifying this -subset using [Query.SetAttrSelection] may speed up the listing process: - - query := &storage.Query{Prefix: ""} - query.SetAttrSelection([]string{"Name"}) - - // ... as before - -# ACLs - -Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of -ACLRules, each of which specifies the role of a user, group or project. ACLs -are suitable for fine-grained control, but you may prefer using IAM to control -access at the project level (see [Cloud Storage IAM docs]. - -To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]: - - acls, err := obj.ACL().List(ctx) - if err != nil { - // TODO: Handle error. - } - for _, rule := range acls { - fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) - } - -You can also set and delete ACLs. - -# Conditions - -Every object has a generation and a metageneration. The generation changes -whenever the content changes, and the metageneration changes whenever the -metadata changes. [Conditions] let you check these values before an operation; -the operation only executes if the conditions match. You can use conditions to -prevent race conditions in read-modify-write operations. - -For example, say you've read an object's metadata into objAttrs. Now -you want to write to that object, but only if its contents haven't changed -since you read it. Here is how to express that: - - w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) - // Proceed with writing as above. - -# Signed URLs - -You can obtain a URL that lets anyone read or write an object for a limited time. -Signing a URL requires credentials authorized to sign a URL. To use the same -authentication that was used when instantiating the Storage client, use -[BucketHandle.SignedURL]. - - url, err := client.Bucket(bucketName).SignedURL(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) - -You can also sign a URL without creating a client. See the documentation of -[SignedURL] for details. - - url, err := storage.SignedURL(bucketName, "shared-object", opts) - if err != nil { - // TODO: Handle error. - } - fmt.Println(url) - -# Post Policy V4 Signed Request - -A type of signed request that allows uploads through HTML forms directly to Cloud Storage with -temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised -by a user. - -For more information, please see the [XML POST Object docs] as well -as the documentation of [BucketHandle.GenerateSignedPostPolicyV4]. - - pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts) - if err != nil { - // TODO: Handle error. - } - fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) - -# Credential requirements for signing - -If the GoogleAccessID and PrivateKey option fields are not provided, they will -be automatically detected by [BucketHandle.SignedURL] and -[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true: - - you are authenticated to the Storage Client with a service account's - downloaded private key, either directly in code or by setting the - GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]), - - your application is running on Google Compute Engine (GCE), or - - you are logged into [gcloud using application default credentials] - with [impersonation enabled]. - -Detecting GoogleAccessID may not be possible if you are authenticated using a -token source or using [option.WithHTTPClient]. In this case, you can provide a -service account email for GoogleAccessID and the client will attempt to sign -the URL or Post Policy using that service account. - -To generate the signature, you must have: - - iam.serviceAccounts.signBlob permissions on the GoogleAccessID service - account, and - - the [IAM Service Account Credentials API] enabled (unless authenticating - with a downloaded private key). - -# Errors - -Errors returned by this client are often of the type [googleapi.Error]. -These errors can be introspected for more information by using [errors.As] -with the richer [googleapi.Error] type. For example: - - var e *googleapi.Error - if ok := errors.As(err, &e); ok { - if e.Code == 409 { ... } - } - -# Retrying failed requests - -Methods in this package may retry calls that fail with transient errors. -Retrying continues indefinitely unless the controlling context is canceled, the -client is closed, or a non-transient error is received. To stop retries from -continuing, use context timeouts or cancellation. - -The retry strategy in this library follows best practices for Cloud Storage. By -default, operations are retried only if they are idempotent, and exponential -backoff with jitter is employed. In addition, errors are only retried if they -are defined as transient by the service. See the [Cloud Storage retry docs] -for more information. - -Users can configure non-default retry behavior for a single library call (using -[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a -client (using [Client.SetRetry]). For example: - - o := client.Bucket(bucket).Object(object).Retryer( - // Use WithBackoff to change the timing of the exponential backoff. - storage.WithBackoff(gax.Backoff{ - Initial: 2 * time.Second, - }), - // Use WithPolicy to configure the idempotency policy. RetryAlways will - // retry the operation even if it is non-idempotent. - storage.WithPolicy(storage.RetryAlways), - ) - - // Use a context timeout to set an overall deadline on the call, including all - // potential retries. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - // Delete an object using the specified strategy and timeout. - if err := o.Delete(ctx); err != nil { - // Handle err. - } - -# Sending Custom Headers - -You can add custom headers to any API call made by this package by using -[callctx.SetHeaders] on the context which is passed to the method. For example, -to add a [custom audit logging] header: - - ctx := context.Background() - ctx = callctx.SetHeaders(ctx, "x-goog-custom-audit-", "") - // Use client as usual with the context and the additional headers will be sent. - client.Bucket("my-bucket").Attrs(ctx) - -# Experimental gRPC API - -This package includes support for the Cloud Storage gRPC API, which is currently -in preview. This implementation uses gRPC rather than the current JSON & XML -APIs to make requests to Cloud Storage. If you would like to try the API, -please contact your GCP account rep for more information. The gRPC API is not -yet generally available, so it may be subject to breaking changes. - -To create a client which will use gRPC, use the alternate constructor: - - ctx := context.Background() - client, err := storage.NewGRPCClient(ctx) - if err != nil { - // TODO: Handle error. - } - // Use client as usual. - -If the application is running within GCP, users may get better performance by -enabling DirectPath (enabling requests to skip some proxy steps). To enable, -set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add -the following side-effect imports to your application: - - import ( - _ "google.golang.org/grpc/balancer/rls" - _ "google.golang.org/grpc/xds/googledirectpath" - ) - -[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam -[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object -[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy -[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth -[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login -[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account -[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview -[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata -*/ -package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh deleted file mode 100644 index 7bad7cf39..000000000 --- a/vendor/cloud.google.com/go/storage/emulator_test.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License.. - -# Fail on any error -set -eo pipefail - -# Display commands being run -set -x - -# Only run on Go 1.17+ -min_minor_ver=17 - -v=`go version | { read _ _ v _; echo ${v#go}; }` -comps=(${v//./ }) -minor_ver=${comps[1]} - -if [ "$minor_ver" -lt "$min_minor_ver" ]; then - echo minor version $minor_ver, skipping - exit 0 -fi - -export STORAGE_EMULATOR_HOST="http://localhost:9000" -export STORAGE_EMULATOR_HOST_GRPC="localhost:8888" - -DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench' -DEFAULT_IMAGE_TAG='latest' -DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG} -CONTAINER_NAME=storage_testbench - -# Note: --net=host makes the container bind directly to the Docker host’s network, -# with no network isolation. If we were to use port-mapping instead, reset connection errors -# would be captured differently and cause unexpected test behaviour. -# The host networking driver works only on Linux hosts. -# See more about using host networking: https://docs.docker.com/network/host/ -DOCKER_NETWORK="--net=host" -# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to -# differences in the network errors emitted by the system. -if [ `go env GOOS` == 'darwin' ]; then - DOCKER_NETWORK="-p 9000:9000 -p 8888:8888" -fi - -# Get the docker image for the testbench -docker pull $DOCKER_IMAGE - -# Start the testbench - -docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE -echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST" -sleep 1 - -# Stop the testbench & cleanup environment variables -function cleanup() { - echo "Cleanup testbench" - docker stop $CONTAINER_NAME - unset STORAGE_EMULATOR_HOST; - unset STORAGE_EMULATOR_HOST_GRPC; -} -trap cleanup EXIT - -# Check that the server is running - retry several times to allow for start-up time -response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null) - -if [[ $response != 200 ]] -then - echo "Testbench server did not start correctly" - exit 1 -fi - -# Start the gRPC server on port 8888. -echo "Starting the gRPC server on port 8888" -response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888") - -if [[ $response != 200 ]] -then - echo "Testbench gRPC server did not start correctly" - exit 1 -fi - -# Run tests -go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go deleted file mode 100644 index e9e959930..000000000 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ /dev/null @@ -1,1887 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "net/url" - "os" - - "cloud.google.com/go/iam/apiv1/iampb" - "cloud.google.com/go/internal/trace" - gapic "cloud.google.com/go/storage/internal/apiv2" - "cloud.google.com/go/storage/internal/apiv2/storagepb" - "github.com/googleapis/gax-go/v2" - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" -) - -const ( - // defaultConnPoolSize is the default number of channels - // to initialize in the GAPIC gRPC connection pool. A larger - // connection pool may be necessary for jobs that require - // high throughput and/or leverage many concurrent streams - // if not running via DirectPath. - // - // This is only used for the gRPC client. - defaultConnPoolSize = 1 - - // maxPerMessageWriteSize is the maximum amount of content that can be sent - // per WriteObjectRequest message. A buffer reaching this amount will - // precipitate a flush of the buffer. It is only used by the gRPC Writer - // implementation. - maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES) - - // globalProjectAlias is the project ID alias used for global buckets. - // - // This is only used for the gRPC API. - globalProjectAlias = "_" - - // msgEntityNotSupported indicates ACL entites using project ID are not currently supported. - // - // This is only used for the gRPC API. - msgEntityNotSupported = "The gRPC API currently does not support ACL entities using project ID, use project numbers instead" -) - -// defaultGRPCOptions returns a set of the default client options -// for gRPC client initialization. -func defaultGRPCOptions() []option.ClientOption { - defaults := []option.ClientOption{ - option.WithGRPCConnectionPool(defaultConnPoolSize), - } - - // Set emulator options for gRPC if an emulator was specified. Note that in a - // hybrid client, STORAGE_EMULATOR_HOST will set the host to use for HTTP and - // STORAGE_EMULATOR_HOST_GRPC will set the host to use for gRPC (when using a - // local emulator, HTTP and gRPC must use different ports, so this is - // necessary). - // - // TODO: When the newHybridClient is not longer used, remove - // STORAGE_EMULATOR_HOST_GRPC and use STORAGE_EMULATOR_HOST for both the - // HTTP and gRPC based clients. - if host := os.Getenv("STORAGE_EMULATOR_HOST_GRPC"); host != "" { - // Strip the scheme from the emulator host. WithEndpoint does not take a - // scheme for gRPC. - host = stripScheme(host) - - defaults = append(defaults, - option.WithEndpoint(host), - option.WithGRPCDialOption(grpc.WithInsecure()), - option.WithoutAuthentication(), - ) - } else { - // Only enable DirectPath when the emulator is not being targeted. - defaults = append(defaults, internaloption.EnableDirectPath(true)) - } - - return defaults -} - -// grpcStorageClient is the gRPC API implementation of the transport-agnostic -// storageClient interface. -type grpcStorageClient struct { - raw *gapic.Client - settings *settings -} - -// newGRPCStorageClient initializes a new storageClient that uses the gRPC -// Storage API. -func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { - s := initSettings(opts...) - s.clientOption = append(defaultGRPCOptions(), s.clientOption...) - - config := newStorageConfig(s.clientOption...) - if config.readAPIWasSet { - return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") - } - - g, err := gapic.NewClient(ctx, s.clientOption...) - if err != nil { - return nil, err - } - - return &grpcStorageClient{ - raw: g, - settings: s, - }, nil -} - -func (c *grpcStorageClient) Close() error { - return c.raw.Close() -} - -// Top-level methods. - -func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetServiceAccountRequest{ - Project: toProjectResource(project), - } - var resp *storagepb.ServiceAccount - err := run(ctx, func(ctx context.Context) error { - var err error - resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - return resp.EmailAddress, err -} - -func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { - if enableObjectRetention != nil { - // TO-DO: implement ObjectRetention once available - see b/308194853 - return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") - } - - s := callSettings(c.settings, opts...) - b := attrs.toProtoBucket() - b.Project = toProjectResource(project) - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if b.GetLocation() == "" && b.GetLifecycle() != nil { - b.Location = "US" - } - - req := &storagepb.CreateBucketRequest{ - Parent: fmt.Sprintf("projects/%s", globalProjectAlias), - Bucket: b, - BucketId: bucket, - } - if attrs != nil { - req.PredefinedAcl = attrs.PredefinedACL - req.PredefinedDefaultObjectAcl = attrs.PredefinedDefaultObjectACL - } - - var battrs *BucketAttrs - err := run(ctx, func(ctx context.Context) error { - res, err := c.raw.CreateBucket(ctx, req, s.gax...) - - battrs = newBucketFromProto(res) - - return err - }, s.retry, s.idempotent) - - return battrs, err -} - -func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { - s := callSettings(c.settings, opts...) - it := &BucketIterator{ - ctx: ctx, - projectID: project, - } - - var gitr *gapic.BucketIterator - fetch := func(pageSize int, pageToken string) (token string, err error) { - - var buckets []*storagepb.Bucket - var next string - err = run(it.ctx, func(ctx context.Context) error { - // Initialize GAPIC-based iterator when pageToken is empty, which - // indicates that this fetch call is attempting to get the first page. - // - // Note: Initializing the GAPIC-based iterator lazily is necessary to - // capture the BucketIterator.Prefix set by the user *after* the - // BucketIterator is returned to them from the veneer. - if pageToken == "" { - req := &storagepb.ListBucketsRequest{ - Parent: toProjectResource(it.projectID), - Prefix: it.Prefix, - } - gitr = c.raw.ListBuckets(ctx, req, s.gax...) - } - buckets, next, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - - for _, bkt := range buckets { - b := newBucketFromProto(bkt) - it.buckets = append(it.buckets, b) - } - - return next, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it -} - -// Bucket methods. - -func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteBucketRequest{ - Name: bucketResourceName(globalProjectAlias, bucket), - } - if err := applyBucketCondsProto("grpcStorageClient.DeleteBucket", conds, req); err != nil { - return err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteBucket(ctx, req, s.gax...) - }, s.retry, s.idempotent) -} - -func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetBucketRequest{ - Name: bucketResourceName(globalProjectAlias, bucket), - ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, - } - if err := applyBucketCondsProto("grpcStorageClient.GetBucket", conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - var battrs *BucketAttrs - err := run(ctx, func(ctx context.Context) error { - res, err := c.raw.GetBucket(ctx, req, s.gax...) - - battrs = newBucketFromProto(res) - - return err - }, s.retry, s.idempotent) - - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return nil, ErrBucketNotExist - } - - return battrs, err -} -func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - b := uattrs.toProtoBucket() - b.Name = bucketResourceName(globalProjectAlias, bucket) - req := &storagepb.UpdateBucketRequest{ - Bucket: b, - PredefinedAcl: uattrs.PredefinedACL, - PredefinedDefaultObjectAcl: uattrs.PredefinedDefaultObjectACL, - } - if err := applyBucketCondsProto("grpcStorageClient.UpdateBucket", conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if uattrs.CORS != nil { - fieldMask.Paths = append(fieldMask.Paths, "cors") - } - if uattrs.DefaultEventBasedHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "default_event_based_hold") - } - if uattrs.RetentionPolicy != nil { - fieldMask.Paths = append(fieldMask.Paths, "retention_policy") - } - if uattrs.VersioningEnabled != nil { - fieldMask.Paths = append(fieldMask.Paths, "versioning") - } - if uattrs.RequesterPays != nil { - fieldMask.Paths = append(fieldMask.Paths, "billing") - } - if uattrs.BucketPolicyOnly != nil || uattrs.UniformBucketLevelAccess != nil || uattrs.PublicAccessPrevention != PublicAccessPreventionUnknown { - fieldMask.Paths = append(fieldMask.Paths, "iam_config") - } - if uattrs.Encryption != nil { - fieldMask.Paths = append(fieldMask.Paths, "encryption") - } - if uattrs.Lifecycle != nil { - fieldMask.Paths = append(fieldMask.Paths, "lifecycle") - } - if uattrs.Logging != nil { - fieldMask.Paths = append(fieldMask.Paths, "logging") - } - if uattrs.Website != nil { - fieldMask.Paths = append(fieldMask.Paths, "website") - } - if uattrs.PredefinedACL != "" { - // In cases where PredefinedACL is set, Acl is cleared. - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - if uattrs.PredefinedDefaultObjectACL != "" { - // In cases where PredefinedDefaultObjectACL is set, DefaultObjectAcl is cleared. - fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - if uattrs.acl != nil { - // In cases where acl is set by UpdateBucketACL method. - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - if uattrs.defaultObjectACL != nil { - // In cases where defaultObjectACL is set by UpdateBucketACL method. - fieldMask.Paths = append(fieldMask.Paths, "default_object_acl") - } - if uattrs.StorageClass != "" { - fieldMask.Paths = append(fieldMask.Paths, "storage_class") - } - if uattrs.RPO != RPOUnknown { - fieldMask.Paths = append(fieldMask.Paths, "rpo") - } - if uattrs.Autoclass != nil { - fieldMask.Paths = append(fieldMask.Paths, "autoclass") - } - - for label := range uattrs.setLabels { - fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) - } - - // Delete a label by not including it in Bucket.Labels but adding the key to the update mask. - for label := range uattrs.deleteLabels { - fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) - } - - req.UpdateMask = fieldMask - - var battrs *BucketAttrs - err := run(ctx, func(ctx context.Context) error { - res, err := c.raw.UpdateBucket(ctx, req, s.gax...) - battrs = newBucketFromProto(res) - return err - }, s.retry, s.idempotent) - - return battrs, err -} -func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.LockBucketRetentionPolicyRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - } - if err := applyBucketCondsProto("grpcStorageClient.LockBucketRetentionPolicy", conds, req); err != nil { - return err - } - - return run(ctx, func(ctx context.Context) error { - _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - -} -func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { - s := callSettings(c.settings, opts...) - it := &ObjectIterator{ - ctx: ctx, - } - if q != nil { - it.query = *q - } - req := &storagepb.ListObjectsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Prefix: it.query.Prefix, - Delimiter: it.query.Delimiter, - Versions: it.query.Versions, - LexicographicStart: it.query.StartOffset, - LexicographicEnd: it.query.EndOffset, - IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, - MatchGlob: it.query.MatchGlob, - ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - fetch := func(pageSize int, pageToken string) (token string, err error) { - // IncludeFoldersAsPrefixes is not supported for gRPC - // TODO: remove this when support is added in the proto. - if it.query.IncludeFoldersAsPrefixes { - return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") - } - var objects []*storagepb.Object - var gitr *gapic.ObjectIterator - err = run(it.ctx, func(ctx context.Context) error { - gitr = c.raw.ListObjects(ctx, req, s.gax...) - it.ctx = ctx - objects, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent) - if err != nil { - if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { - err = ErrBucketNotExist - } - return "", err - } - - for _, obj := range objects { - b := newObjectFromProto(obj) - it.items = append(it.items, b) - } - - // Response is always non-nil after a successful request. - res := gitr.Response.(*storagepb.ListObjectsResponse) - for _, prefix := range res.GetPrefixes() { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - - return token, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - - return it -} - -// Object metadata methods. - -func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, - } - if err := applyCondsProto("grpcStorageClient.DeleteObject", gen, conds, req); err != nil { - return err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - err := run(ctx, func(ctx context.Context) error { - return c.raw.DeleteObject(ctx, req, s.gax...) - }, s.retry, s.idempotent) - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return ErrObjectNotExist - } - return err -} - -func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetObjectRequest{ - Bucket: bucketResourceName(globalProjectAlias, bucket), - Object: object, - // ProjectionFull by default. - ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, - } - if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) - } - - var attrs *ObjectAttrs - err := run(ctx, func(ctx context.Context) error { - res, err := c.raw.GetObject(ctx, req, s.gax...) - attrs = newObjectFromProto(res) - - return err - }, s.retry, s.idempotent) - - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return nil, ErrObjectNotExist - } - - return attrs, err -} - -func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { - uattrs := params.uattrs - if params.overrideRetention != nil || uattrs.Retention != nil { - // TO-DO: implement ObjectRetention once available - see b/308194853 - return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") - } - s := callSettings(c.settings, opts...) - o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, params.bucket), params.object) - // For Update, generation is passed via the object message rather than a field on the request. - if params.gen >= 0 { - o.Generation = params.gen - } - req := &storagepb.UpdateObjectRequest{ - Object: o, - PredefinedAcl: uattrs.PredefinedACL, - } - if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, params.conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if params.encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) - } - - fieldMask := &fieldmaskpb.FieldMask{Paths: nil} - if uattrs.EventBasedHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "event_based_hold") - } - if uattrs.TemporaryHold != nil { - fieldMask.Paths = append(fieldMask.Paths, "temporary_hold") - } - if uattrs.ContentType != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_type") - } - if uattrs.ContentLanguage != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_language") - } - if uattrs.ContentEncoding != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_encoding") - } - if uattrs.ContentDisposition != nil { - fieldMask.Paths = append(fieldMask.Paths, "content_disposition") - } - if uattrs.CacheControl != nil { - fieldMask.Paths = append(fieldMask.Paths, "cache_control") - } - if !uattrs.CustomTime.IsZero() { - fieldMask.Paths = append(fieldMask.Paths, "custom_time") - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { - fieldMask.Paths = append(fieldMask.Paths, "acl") - } - - if uattrs.Metadata != nil { - // We don't support deleting a specific metadata key; metadata is deleted - // as a whole if provided an empty map, so we do not use dot notation here - if len(uattrs.Metadata) == 0 { - fieldMask.Paths = append(fieldMask.Paths, "metadata") - } else { - // We can, however, use dot notation for adding keys - for key := range uattrs.Metadata { - fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("metadata.%s", key)) - } - } - } - - req.UpdateMask = fieldMask - - var attrs *ObjectAttrs - err := run(ctx, func(ctx context.Context) error { - res, err := c.raw.UpdateObject(ctx, req, s.gax...) - attrs = newObjectFromProto(res) - return err - }, s.retry, s.idempotent) - if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { - return nil, ErrObjectNotExist - } - - return attrs, err -} - -// Default Object ACL methods. - -func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.DefaultObjectACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.DefaultObjectACL, msgEntityNotSupported) - } - uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -func (c *grpcStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return nil, err - } - return attrs.DefaultObjectACL, nil -} - -func (c *grpcStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.DefaultObjectACL, aclRule) - uattrs := &BucketAttrsToUpdate{defaultObjectACL: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -// Bucket ACL methods. - -func (c *grpcStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.ACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) - } - uattrs := &BucketAttrsToUpdate{acl: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -func (c *grpcStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return nil, err - } - return attrs.ACL, nil -} - -func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve BucketAttrs. - attrs, err := c.GetBucket(ctx, bucket, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.ACL, aclRule) - uattrs := &BucketAttrsToUpdate{acl: acl} - // Call UpdateBucket with a MetagenerationMatch precondition set. - if _, err = c.UpdateBucket(ctx, bucket, uattrs, &BucketConditions{MetagenerationMatch: attrs.MetaGeneration}, opts...); err != nil { - return err - } - return nil -} - -// Object ACL methods. - -func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return err - } - // Delete the entity and copy other remaining ACL entities. - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - // Return error if entity is not found or a project ID is used. - invalidEntity := true - var acl []ACLRule - for _, a := range attrs.ACL { - if a.Entity != entity { - acl = append(acl, a) - } - if a.Entity == entity { - invalidEntity = false - } - } - if invalidEntity { - return fmt.Errorf("storage: entity %v was not found on bucket %v, got %v. %v", entity, bucket, attrs.ACL, msgEntityNotSupported) - } - uattrs := &ObjectAttrsToUpdate{ACL: acl} - // Call UpdateObject with the specified metageneration. - params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} - if _, err = c.UpdateObject(ctx, params, opts...); err != nil { - return err - } - return nil -} - -// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. -// Selecting a specific generation of this object is not currently supported by the client. -func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return nil, err - } - return o.ACL, nil -} - -func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - // There is no separate API for PATCH in gRPC. - // Make a GET call first to retrieve ObjectAttrs. - attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...) - if err != nil { - return err - } - // Note: This API currently does not support entites using project ID. - // Use project numbers in ACL entities. Pending b/233617896. - var acl []ACLRule - aclRule := ACLRule{Entity: entity, Role: role} - acl = append(attrs.ACL, aclRule) - uattrs := &ObjectAttrsToUpdate{ACL: acl} - // Call UpdateObject with the specified metageneration. - params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} - if _, err = c.UpdateObject(ctx, params, opts...); err != nil { - return err - } - return nil -} - -// Media operations. - -func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) - dstObjPb.Name = req.dstObject.name - - if req.sendCRC32C { - dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C - } - - srcs := []*storagepb.ComposeObjectRequest_SourceObject{} - for _, src := range req.srcs { - srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name, ObjectPreconditions: &storagepb.ComposeObjectRequest_SourceObject_ObjectPreconditions{}} - if src.gen >= 0 { - srcObjPb.Generation = src.gen - } - if err := applyCondsProto("ComposeObject source", defaultGen, src.conds, srcObjPb.ObjectPreconditions); err != nil { - return nil, err - } - srcs = append(srcs, srcObjPb) - } - - rawReq := &storagepb.ComposeObjectRequest{ - Destination: dstObjPb, - SourceObjects: srcs, - } - if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, rawReq); err != nil { - return nil, err - } - if req.predefinedACL != "" { - rawReq.DestinationPredefinedAcl = req.predefinedACL - } - if req.dstObject.encryptionKey != nil { - rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) - } - - var obj *storagepb.Object - var err error - if err := run(ctx, func(ctx context.Context) error { - obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) - return err - }, s.retry, s.idempotent); err != nil { - return nil, err - } - - return newObjectFromProto(obj), nil -} -func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - s := callSettings(c.settings, opts...) - obj := req.dstObject.attrs.toProtoObject("") - call := &storagepb.RewriteObjectRequest{ - SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), - SourceObject: req.srcObject.name, - RewriteToken: req.token, - DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), - DestinationName: req.dstObject.name, - Destination: obj, - DestinationKmsKey: req.dstObject.keyName, - DestinationPredefinedAcl: req.predefinedACL, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), - } - - // The userProject, whether source or destination project, is decided by the code calling the interface. - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil { - return nil, err - } - - if len(req.dstObject.encryptionKey) > 0 { - call.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey) - } - if len(req.srcObject.encryptionKey) > 0 { - srcParams := toProtoCommonObjectRequestParams(req.srcObject.encryptionKey) - call.CopySourceEncryptionAlgorithm = srcParams.GetEncryptionAlgorithm() - call.CopySourceEncryptionKeyBytes = srcParams.GetEncryptionKeyBytes() - call.CopySourceEncryptionKeySha256Bytes = srcParams.GetEncryptionKeySha256Bytes() - } - - call.MaxBytesRewrittenPerCall = req.maxBytesRewrittenPerCall - - var res *storagepb.RewriteResponse - var err error - - retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { - return nil, err - } - - r := &rewriteObjectResponse{ - done: res.GetDone(), - written: res.GetTotalBytesRewritten(), - size: res.GetObjectSize(), - token: res.GetRewriteToken(), - resource: newObjectFromProto(res.GetResource()), - } - - return r, nil -} - -func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - - b := bucketResourceName(globalProjectAlias, params.bucket) - req := &storagepb.ReadObjectRequest{ - Bucket: b, - Object: params.object, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), - } - // The default is a negative value, which means latest. - if params.gen >= 0 { - req.Generation = params.gen - } - - // Define a function that initiates a Read with offset and length, assuming - // we have already read seen bytes. - reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { - // If the context has already expired, return immediately without making - // we call. - if err := ctx.Err(); err != nil { - return nil, nil, err - } - - cc, cancel := context.WithCancel(ctx) - - req.ReadOffset = params.offset + seen - - // Only set a ReadLimit if length is greater than zero, because <= 0 means - // to read it all. - if params.length > 0 { - req.ReadLimit = params.length - seen - } - - if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { - cancel() - return nil, nil, err - } - - var stream storagepb.Storage_ReadObjectClient - var msg *storagepb.ReadObjectResponse - var err error - - err = run(cc, func(ctx context.Context) error { - stream, err = c.raw.ReadObject(cc, req, s.gax...) - if err != nil { - return err - } - - msg, err = stream.Recv() - // These types of errors show up on the Recv call, rather than the - // initialization of the stream via ReadObject above. - if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { - return ErrObjectNotExist - } - - return err - }, s.retry, s.idempotent) - if err != nil { - // Close the stream context we just created to ensure we don't leak - // resources. - cancel() - return nil, nil, err - } - - return &readStreamResponse{stream, msg}, cancel, nil - } - - res, cancel, err := reopen(0) - if err != nil { - return nil, err - } - - // The first message was Recv'd on stream open, use it to populate the - // object metadata. - msg := res.response - obj := msg.GetMetadata() - // This is the size of the entire object, even if only a range was requested. - size := obj.GetSize() - - r = &Reader{ - Attrs: ReaderObjectAttrs{ - Size: size, - ContentType: obj.GetContentType(), - ContentEncoding: obj.GetContentEncoding(), - CacheControl: obj.GetCacheControl(), - LastModified: obj.GetUpdateTime().AsTime(), - Metageneration: obj.GetMetageneration(), - Generation: obj.GetGeneration(), - }, - reader: &gRPCReader{ - stream: res.stream, - reopen: reopen, - cancel: cancel, - size: size, - // Store the content from the first Recv in the - // client buffer for reading later. - leftovers: msg.GetChecksummedData().GetContent(), - settings: s, - zeroRange: params.length == 0, - }, - } - - cr := msg.GetContentRange() - if cr != nil { - r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() - } else { - r.remain = size - } - - // For a zero-length request, explicitly close the stream and set remaining - // bytes to zero. - if params.length == 0 { - r.remain = 0 - r.reader.Close() - } - - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - - return r, nil -} - -func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { - s := callSettings(c.settings, opts...) - - var offset int64 - errorf := params.setError - progress := params.progress - setObj := params.setObj - - pr, pw := io.Pipe() - gw := newGRPCWriter(c, params, pr) - gw.settings = s - if s.userProject != "" { - gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject) - } - - // This function reads the data sent to the pipe and sends sets of messages - // on the gRPC client-stream as the buffer is filled. - go func() { - defer close(params.donec) - - // Loop until there is an error or the Object has been finalized. - for { - // Note: This blocks until either the buffer is full or EOF is read. - recvd, doneReading, err := gw.read() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - - if params.attrs.Retention != nil { - // TO-DO: remove once ObjectRetention is available - see b/308194853 - err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") - errorf(err) - pr.CloseWithError(err) - return - } - // The chunk buffer is full, but there is no end in sight. This - // means that either: - // 1. A resumable upload will need to be used to send - // multiple chunks, until we are done reading data. Start a - // resumable upload if it has not already been started. - // 2. ChunkSize of zero may also have a full buffer, but a resumable - // session should not be initiated in this case. - if !doneReading && gw.upid == "" && params.chunkSize != 0 { - err = gw.startResumableUpload() - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - } - - o, off, err := gw.uploadBuffer(recvd, offset, doneReading) - if err != nil { - err = checkCanceled(err) - errorf(err) - pr.CloseWithError(err) - return - } - - // At this point, the current buffer has been uploaded. For resumable - // uploads and chunkSize = 0, capture the committed offset here in case - // the upload was not finalized and another chunk is to be uploaded. Call - // the progress function for resumable uploads only. - if gw.upid != "" || gw.chunkSize == 0 { - offset = off - } - if gw.upid != "" { - progress(offset) - } - - // When we are done reading data without errors, set the object and - // finish. - if doneReading { - // Build Object from server's response. - setObj(newObjectFromProto(o)) - return - } - } - }() - - return pw, nil -} - -// IAM methods. - -func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - req := &iampb.GetIamPolicyRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Options: &iampb.GetPolicyOptions{ - RequestedPolicyVersion: version, - }, - } - var rp *iampb.Policy - err := run(ctx, func(ctx context.Context) error { - var err error - rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - - return rp, err -} - -func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - - req := &iampb.SetIamPolicyRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Policy: policy, - } - - return run(ctx, func(ctx context.Context) error { - _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) -} - -func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { - // TODO: Need a way to set UserProject, potentially in X-Goog-User-Project system parameter. - s := callSettings(c.settings, opts...) - req := &iampb.TestIamPermissionsRequest{ - Resource: bucketResourceName(globalProjectAlias, resource), - Permissions: permissions, - } - var res *iampb.TestIamPermissionsResponse - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// HMAC Key methods. - -func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.GetHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil -} - -func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - req := &storagepb.ListHmacKeysRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - ShowDeletedKeys: showDeletedKeys, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - it := &HMACKeysIterator{ - ctx: ctx, - projectID: project, - retry: s.retry, - } - fetch := func(pageSize int, pageToken string) (token string, err error) { - var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func(ctx context.Context) error { - gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) - hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - for _, hkmd := range hmacKeys { - hk := toHMACKeyFromProto(hkmd) - it.hmacKeys = append(it.hmacKeys, hk) - } - - return token, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it -} - -func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - hk := &storagepb.HmacKeyMetadata{ - AccessId: accessID, - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - State: string(attrs.State), - Etag: attrs.Etag, - } - var paths []string - fieldMask := &fieldmaskpb.FieldMask{ - Paths: paths, - } - if attrs.State != "" { - fieldMask.Paths = append(fieldMask.Paths, "state") - } - req := &storagepb.UpdateHmacKeyRequest{ - HmacKey: hk, - UpdateMask: fieldMask, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func(ctx context.Context) error { - var err error - metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toHMACKeyFromProto(metadata), nil -} - -func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - req := &storagepb.CreateHmacKeyRequest{ - Project: toProjectResource(project), - ServiceAccountEmail: serviceAccountEmail, - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - key := toHMACKeyFromProto(res.Metadata) - key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes) - - return key, nil -} - -func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteHmacKeyRequest{ - AccessId: accessID, - Project: toProjectResource(project), - } - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent) -} - -// Notification methods. - -func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - if s.userProject != "" { - ctx = setUserProjectMetadata(ctx, s.userProject) - } - req := &storagepb.ListNotificationConfigsRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - } - var notifications []*storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) - for { - // PageSize is not set and fallbacks to the API default pageSize of 100. - items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) - if err != nil { - return err - } - notifications = append(notifications, items...) - // If there are no more results, nextPageToken is empty and err is nil. - if nextPageToken == "" { - return err - } - req.PageToken = nextPageToken - } - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - - return notificationsToMapFromProto(notifications), nil -} - -func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationConfigRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - NotificationConfig: toProtoNotification(n), - } - var pbn *storagepb.NotificationConfig - err = run(ctx, func(ctx context.Context) error { - var err error - pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toNotificationFromProto(pbn), err -} - -func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationConfigRequest{Name: id} - return run(ctx, func(ctx context.Context) error { - return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) - }, s.retry, s.idempotent) -} - -// setUserProjectMetadata appends a project ID to the outgoing Context metadata -// via the x-goog-user-project system parameter defined at -// https://cloud.google.com/apis/docs/system-parameters. This is only for -// billing purposes, and is generally optional, except for requester-pays -// buckets. -func setUserProjectMetadata(ctx context.Context, project string) context.Context { - return metadata.AppendToOutgoingContext(ctx, "x-goog-user-project", project) -} - -type readStreamResponse struct { - stream storagepb.Storage_ReadObjectClient - response *storagepb.ReadObjectResponse -} - -type gRPCReader struct { - seen, size int64 - zeroRange bool - stream storagepb.Storage_ReadObjectClient - reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) - leftovers []byte - cancel context.CancelFunc - settings *settings -} - -// Read reads bytes into the user's buffer from an open gRPC stream. -func (r *gRPCReader) Read(p []byte) (int, error) { - // The entire object has been read by this reader, return EOF. - if r.size == r.seen || r.zeroRange { - return 0, io.EOF - } - - // No stream to read from, either never initialized or Close was called. - // Note: There is a potential concurrency issue if multiple routines are - // using the same reader. One encounters an error and the stream is closed - // and then reopened while the other routine attempts to read from it. - if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") - } - - var n int - // Read leftovers and return what was available to conform to the Reader - // interface: https://pkg.go.dev/io#Reader. - if len(r.leftovers) > 0 { - n = copy(p, r.leftovers) - r.seen += int64(n) - r.leftovers = r.leftovers[n:] - return n, nil - } - - // Attempt to Recv the next message on the stream. - msg, err := r.recv() - if err != nil { - return 0, err - } - - // TODO: Determine if we need to capture incremental CRC32C for this - // chunk. The Object CRC32C checksum is captured when directed to read - // the entire Object. If directed to read a range, we may need to - // calculate the range's checksum for verification if the checksum is - // present in the response here. - // TODO: Figure out if we need to support decompressive transcoding - // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() - n = copy(p[n:], content) - leftover := len(content) - n - if leftover > 0 { - // Wasn't able to copy all of the data in the message, store for - // future Read calls. - r.leftovers = content[n:] - } - r.seen += int64(n) - - return n, nil -} - -// Close cancels the read stream's context in order for it to be closed and -// collected. -func (r *gRPCReader) Close() error { - if r.cancel != nil { - r.cancel() - } - r.stream = nil - return nil -} - -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: -// -// * Recv is successful -// * A non-retryable error is encountered -// * The Reader's context is canceled -// -// The last error received is the one that is returned, which could be from -// an attempt to reopen the stream. -func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() - var shouldRetry = ShouldRetry - if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { - shouldRetry = r.settings.retry.shouldRetry - } - if err != nil && shouldRetry(err) { - // This will "close" the existing stream and immediately attempt to - // reopen the stream, but will backoff if further attempts are necessary. - // Reopening the stream Recvs the first message, so if retrying is - // successful, the next logical chunk will be returned. - msg, err = r.reopenStream() - } - - return msg, err -} - -// reopenStream "closes" the existing stream and attempts to reopen a stream and -// sets the Reader's stream and cancelStream properties in the process. -func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { - // Close existing stream and initialize new stream with updated offset. - r.Close() - - res, cancel, err := r.reopen(r.seen) - if err != nil { - return nil, err - } - r.stream = res.stream - r.cancel = cancel - return res.response, nil -} - -func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { - size := params.chunkSize - - // Round up chunksize to nearest 256KiB - if size%googleapi.MinUploadChunkSize != 0 { - size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) - } - - // A completely bufferless upload is not possible as it is in JSON because - // the buffer must be provided to the message. However use the minimum size - // possible in this case. - if params.chunkSize == 0 { - size = googleapi.MinUploadChunkSize - } - - return &gRPCWriter{ - buf: make([]byte, size), - c: c, - ctx: params.ctx, - reader: r, - bucket: params.bucket, - attrs: params.attrs, - conds: params.conds, - encryptionKey: params.encryptionKey, - sendCRC32C: params.sendCRC32C, - chunkSize: params.chunkSize, - } -} - -// gRPCWriter is a wrapper around the the gRPC client-stream API that manages -// sending chunks of data provided by the user over the stream. -type gRPCWriter struct { - c *grpcStorageClient - buf []byte - reader io.Reader - - ctx context.Context - - bucket string - attrs *ObjectAttrs - conds *Conditions - encryptionKey []byte - settings *settings - - sendCRC32C bool - chunkSize int - - // The gRPC client-stream used for sending buffers. - stream storagepb.Storage_BidiWriteObjectClient - - // The Resumable Upload ID started by a gRPC-based Writer. - upid string -} - -// startResumableUpload initializes a Resumable Upload with gRPC and sets the -// upload ID on the Writer. -func (w *gRPCWriter) startResumableUpload() error { - spec, err := w.writeObjectSpec() - if err != nil { - return err - } - req := &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, - CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), - } - // TODO: Currently the checksums are only sent on the request to initialize - // the upload, but in the future, we must also support sending it - // on the *last* message of the stream. - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func(ctx context.Context) error { - upres, err := w.c.raw.StartResumableWrite(w.ctx, req) - w.upid = upres.GetUploadId() - return err - }, w.settings.retry, w.settings.idempotent) -} - -// queryProgress is a helper that queries the status of the resumable upload -// associated with the given upload ID. -func (w *gRPCWriter) queryProgress() (int64, error) { - var persistedSize int64 - err := run(w.ctx, func(ctx context.Context) error { - q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ - UploadId: w.upid, - }) - persistedSize = q.GetPersistedSize() - return err - }, w.settings.retry, true) - - // q.GetCommittedSize() will return 0 if q is nil. - return persistedSize, err -} - -// uploadBuffer uploads the buffer at the given offset using a bi-directional -// Write stream. It will open a new stream if necessary (on the first call or -// after resuming from failure). The resulting write offset after uploading the -// buffer is returned, as well as well as the final Object if the upload is -// completed. -// -// Returns object, persisted size, and any error that is not retriable. -func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) { - var shouldRetry = ShouldRetry - if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { - shouldRetry = w.settings.retry.shouldRetry - } - - var err error - var lastWriteOfEntireObject bool - - sent := 0 - writeOffset := start - - toWrite := w.buf[:recvd] - - // Send a request with as many bytes as possible. - // Loop until all bytes are sent. - for { - bytesNotYetSent := recvd - sent - remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize - - if remainingDataFitsInSingleReq && doneReading { - lastWriteOfEntireObject = true - } - - // Send the maximum amount of bytes we can, unless we don't have that many. - bytesToSendInCurrReq := maxPerMessageWriteSize - if remainingDataFitsInSingleReq { - bytesToSendInCurrReq = bytesNotYetSent - } - - // Prepare chunk section for upload. - data := toWrite[sent : sent+bytesToSendInCurrReq] - - req := &storagepb.BidiWriteObjectRequest{ - Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ - ChecksummedData: &storagepb.ChecksummedData{ - Content: data, - }, - }, - WriteOffset: writeOffset, - FinishWrite: lastWriteOfEntireObject, - Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, - StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, - } - - // Open a new stream if necessary and set the first_message field on - // the request. The first message on the WriteObject stream must either - // be the Object or the Resumable Upload ID. - if w.stream == nil { - hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} - ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) - - w.stream, err = w.c.raw.BidiWriteObject(ctx) - if err != nil { - return nil, 0, err - } - - if w.upid != "" { // resumable upload - req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} - } else { // non-resumable - spec, err := w.writeObjectSpec() - if err != nil { - return nil, 0, err - } - req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ - WriteObjectSpec: spec, - } - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) - // For a non-resumable upload, checksums must be sent in this message. - // TODO: Currently the checksums are only sent on the first message - // of the stream, but in the future, we must also support sending it - // on the *last* message of the stream (instead of the first). - req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - } - } - - err = w.stream.Send(req) - if err == io.EOF { - // err was io.EOF. The client-side of a stream only gets an EOF on Send - // when the backend closes the stream and wants to return an error - // status. - - // Receive from the stream Recv() until it returns a non-nil error - // to receive the server's status as an error. We may get multiple - // messages before the error due to buffering. - err = nil - for err == nil { - _, err = w.stream.Recv() - } - // Drop the stream reference as a new one will need to be created if - // we retry. - w.stream = nil - - // Drop the stream reference as a new one will need to be created if - // we can retry the upload - w.stream = nil - - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received. - if shouldRetry(err) { - // TODO: Add test case for failure modes of querying progress. - writeOffset, err = w.determineOffset(start) - if err != nil { - return nil, 0, err - } - sent = int(writeOffset) - int(start) - - // Continue sending requests, opening a new stream and resending - // any bytes not yet persisted as per QueryWriteStatus - continue - } - } - if err != nil { - return nil, 0, err - } - - // Update the immediate stream's sent total and the upload offset with - // the data sent. - sent += len(data) - writeOffset += int64(len(data)) - - // Not done sending data, do not attempt to commit it yet, loop around - // and send more data. - if recvd-sent > 0 { - continue - } - - // The buffer has been uploaded and there is still more data to be - // uploaded, but this is not a resumable upload session. Therefore, - // don't check persisted data. - if !lastWriteOfEntireObject && w.chunkSize == 0 { - return nil, writeOffset, nil - } - - // Done sending the data in the buffer (remainingDataFitsInSingleReq - // should == true if we reach this code). - // If we are done sending the whole object, close the stream and get the final - // object. Otherwise, receive from the stream to confirm the persisted data. - if !lastWriteOfEntireObject { - resp, err := w.stream.Recv() - - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - writeOffset, err = w.determineOffset(start) - if err != nil { - return nil, 0, err - } - sent = int(writeOffset) - int(start) - - // Drop the stream reference as a new one will need to be created. - w.stream = nil - - continue - } - if err != nil { - return nil, 0, err - } - - if resp.GetPersistedSize() != writeOffset { - // Retry if not all bytes were persisted. - writeOffset = resp.GetPersistedSize() - sent = int(writeOffset) - int(start) - continue - } - } else { - // If the object is done uploading, close the send stream to signal - // to the server that we are done sending so that we can receive - // from the stream without blocking. - err = w.stream.CloseSend() - if err != nil { - // CloseSend() retries the send internally. It never returns an - // error in the current implementation, but we check it anyway in - // case that it does in the future. - return nil, 0, err - } - - // Stream receives do not block once send is closed, but we may not - // receive the response with the object right away; loop until we - // receive the object or error out. - var obj *storagepb.Object - for obj == nil { - resp, err := w.stream.Recv() - if err != nil { - return nil, 0, err - } - - obj = resp.GetResource() - } - - // Even though we received the object response, continue reading - // until we receive a non-nil error, to ensure the stream does not - // leak even if the context isn't cancelled. See: - // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream - for err == nil { - _, err = w.stream.Recv() - } - - return obj, writeOffset, nil - } - - return nil, writeOffset, nil - } -} - -// determineOffset either returns the offset given to it in the case of a simple -// upload, or queries the write status in the case a resumable upload is being -// used. -func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { - // For a Resumable Upload, we must start from however much data - // was committed. - if w.upid != "" { - committed, err := w.queryProgress() - if err != nil { - return 0, err - } - offset = committed - } - return offset, nil -} - -// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's -// ObjectAttrs and applies its Conditions. This is only used for gRPC. -func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { - // To avoid modifying the ObjectAttrs embeded in the calling writer, deref - // the ObjectAttrs pointer to make a copy, then assign the desired name to - // the attribute. - attrs := *w.attrs - - spec := &storagepb.WriteObjectSpec{ - Resource: attrs.toProtoObject(w.bucket), - } - // WriteObject doesn't support the generation condition, so use default. - if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil { - return nil, err - } - return spec, nil -} - -// read copies the data in the reader to the given buffer and reports how much -// data was read into the buffer and if there is no more data to read (EOF). -// Furthermore, if the attrs.ContentType is unset, the first bytes of content -// will be sniffed for a matching content type. -func (w *gRPCWriter) read() (int, bool, error) { - if w.attrs.ContentType == "" { - w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) - } - // Set n to -1 to start the Read loop. - var n, recvd int = -1, 0 - var err error - for err == nil && n != 0 { - // The routine blocks here until data is received. - n, err = w.reader.Read(w.buf[recvd:]) - recvd += n - } - var done bool - if err == io.EOF { - done = true - err = nil - } - return recvd, done, err -} - -func checkCanceled(err error) error { - if status.Code(err) == codes.Canceled { - return context.Canceled - } - - return err -} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go deleted file mode 100644 index 1b9fbe9dd..000000000 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "time" - - "cloud.google.com/go/storage/internal/apiv2/storagepb" - "google.golang.org/api/iterator" - raw "google.golang.org/api/storage/v1" -) - -// HMACState is the state of the HMAC key. -type HMACState string - -const ( - // Active is the status for an active key that can be used to sign - // requests. - Active HMACState = "ACTIVE" - - // Inactive is the status for an inactive key thus requests signed by - // this key will be denied. - Inactive HMACState = "INACTIVE" - - // Deleted is the status for a key that is deleted. - // Once in this state the key cannot key cannot be recovered - // and does not count towards key limits. Deleted keys will be cleaned - // up later. - Deleted HMACState = "DELETED" -) - -// HMACKey is the representation of a Google Cloud Storage HMAC key. -// -// HMAC keys are used to authenticate signed access to objects. To enable HMAC key -// authentication, please visit https://cloud.google.com/storage/docs/migrating. -type HMACKey struct { - // The HMAC's secret key. - Secret string - - // AccessID is the ID of the HMAC key. - AccessID string - - // Etag is the HTTP/1.1 Entity tag. - Etag string - - // ID is the ID of the HMAC key, including the ProjectID and AccessID. - ID string - - // ProjectID is the ID of the project that owns the - // service account to which the key authenticates. - ProjectID string - - // ServiceAccountEmail is the email address - // of the key's associated service account. - ServiceAccountEmail string - - // CreatedTime is the creation time of the HMAC key. - CreatedTime time.Time - - // UpdatedTime is the last modification time of the HMAC key metadata. - UpdatedTime time.Time - - // State is the state of the HMAC key. - // It can be one of StateActive, StateInactive or StateDeleted. - State HMACState -} - -// HMACKeyHandle helps provide access and management for HMAC keys. -type HMACKeyHandle struct { - projectID string - accessID string - retry *retryConfig - tc storageClient -} - -// HMACKeyHandle creates a handle that will be used for HMACKey operations. -func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { - return &HMACKeyHandle{ - projectID: projectID, - accessID: accessID, - retry: c.retry, - tc: c.tc, - } -} - -// Get invokes an RPC to retrieve the HMAC key referenced by the -// HMACKeyHandle's accessID. -// -// Options such as UserProjectForHMACKeys can be used to set the -// userProject to be billed against for operations. -func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, hkh.retry, desc.userProjectID) - hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...) - - return hk, err -} - -// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. -// Only inactive HMAC keys can be deleted. -// After deletion, a key cannot be used to authenticate requests. -func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, hkh.retry, desc.userProjectID) - return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...) -} - -func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { - hkmd := hk.Metadata - if hkmd == nil { - return nil, errors.New("field Metadata cannot be nil") - } - createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated) - if err != nil { - return nil, fmt.Errorf("field CreatedTime: %w", err) - } - updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated) - if err != nil && !updatedTimeCanBeNil { - return nil, fmt.Errorf("field UpdatedTime: %w", err) - } - - hmKey := &HMACKey{ - AccessID: hkmd.AccessId, - Secret: hk.Secret, - Etag: hkmd.Etag, - ID: hkmd.Id, - State: HMACState(hkmd.State), - ProjectID: hkmd.ProjectId, - CreatedTime: createdTime, - UpdatedTime: updatedTime, - - ServiceAccountEmail: hkmd.ServiceAccountEmail, - } - - return hmKey, nil -} - -func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { - if pbmd == nil { - return nil - } - - return &HMACKey{ - AccessID: pbmd.GetAccessId(), - ID: pbmd.GetId(), - State: HMACState(pbmd.GetState()), - ProjectID: pbmd.GetProject(), - CreatedTime: convertProtoTime(pbmd.GetCreateTime()), - UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()), - ServiceAccountEmail: pbmd.GetServiceAccountEmail(), - } -} - -// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. -func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { - if projectID == "" { - return nil, errors.New("storage: expecting a non-blank projectID") - } - if serviceAccountEmail == "" { - return nil, errors.New("storage: expecting a non-blank service account email") - } - - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(false, c.retry, desc.userProjectID) - hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...) - return hk, err -} - -// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. -type HMACKeyAttrsToUpdate struct { - // State is required and must be either StateActive or StateInactive. - State HMACState - - // Etag is an optional field and it is the HTTP/1.1 Entity tag. - Etag string -} - -// Update mutates the HMACKey referred to by accessID. -func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { - if au.State != Active && au.State != Inactive { - return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) - } - - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - isIdempotent := len(au.Etag) > 0 - o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID) - hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...) - return hk, err -} - -// An HMACKeysIterator is an iterator over HMACKeys. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -type HMACKeysIterator struct { - ctx context.Context - raw *raw.ProjectsHmacKeysService - projectID string - hmacKeys []*HMACKey - pageInfo *iterator.PageInfo - nextFunc func() error - index int - desc hmacKeyDesc - retry *retryConfig -} - -// ListHMACKeys returns an iterator for listing HMACKeys. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { - desc := new(hmacKeyDesc) - for _, opt := range opts { - opt.withHMACKeyDesc(desc) - } - - o := makeStorageOpts(true, c.retry, desc.userProjectID) - return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...) -} - -// Next returns the next result. Its second return value is iterator.Done if -// there are no more results. Once Next returns iterator.Done, all subsequent -// calls will return iterator.Done. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -func (it *HMACKeysIterator) Next() (*HMACKey, error) { - if err := it.nextFunc(); err != nil { - return nil, err - } - - key := it.hmacKeys[it.index] - it.index++ - - return key, nil -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -// -// Note: This iterator is not safe for concurrent operations without explicit synchronization. -func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } - -func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { - // TODO: Remove fetch method upon integration. This method is internalized into - // httpStorageClient.ListHMACKeys() as it is the only caller. - call := it.raw.List(it.projectID) - setClientHeader(call.Header()) - if pageToken != "" { - call = call.PageToken(pageToken) - } - if it.desc.showDeletedKeys { - call = call.ShowDeletedKeys(true) - } - if it.desc.userProjectID != "" { - call = call.UserProject(it.desc.userProjectID) - } - if it.desc.forServiceAccountEmail != "" { - call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) - } - if pageSize > 0 { - call = call.MaxResults(int64(pageSize)) - } - - var resp *raw.HmacKeysMetadata - err = run(it.ctx, func(ctx context.Context) error { - resp, err = call.Context(ctx).Do() - return err - }, it.retry, true) - if err != nil { - return "", err - } - - for _, metadata := range resp.Items { - hk := &raw.HmacKey{ - Metadata: metadata, - } - hkey, err := toHMACKeyFromRaw(hk, true) - if err != nil { - return "", err - } - it.hmacKeys = append(it.hmacKeys, hkey) - } - return resp.NextPageToken, nil -} - -type hmacKeyDesc struct { - forServiceAccountEmail string - showDeletedKeys bool - userProjectID string -} - -// HMACKeyOption configures the behavior of HMACKey related methods and actions. -type HMACKeyOption interface { - withHMACKeyDesc(*hmacKeyDesc) -} - -type hmacKeyDescFunc func(*hmacKeyDesc) - -func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { - hkdf(hkd) -} - -// ForHMACKeyServiceAccountEmail returns HMAC Keys that are -// associated with the email address of a service account in the project. -// -// Only one service account email can be used as a filter, so if multiple -// of these options are applied, the last email to be set will be used. -func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.forServiceAccountEmail = serviceAccountEmail - }) -} - -// ShowDeletedHMACKeys will also list keys whose state is "DELETED". -func ShowDeletedHMACKeys() HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.showDeletedKeys = true - }) -} - -// UserProjectForHMACKeys will bill the request against userProjectID -// if userProjectID is non-empty. -// -// Note: This is a noop right now and only provided for API compatibility. -func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { - return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { - hkd.userProjectID = userProjectID - }) -} diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go deleted file mode 100644 index a0f3c00a7..000000000 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ /dev/null @@ -1,1445 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "reflect" - "strconv" - "strings" - "time" - - "cloud.google.com/go/iam/apiv1/iampb" - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - "github.com/googleapis/gax-go/v2/callctx" - "golang.org/x/oauth2/google" - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - raw "google.golang.org/api/storage/v1" - "google.golang.org/api/transport" - htransport "google.golang.org/api/transport/http" -) - -// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic -// storageClient interface. -type httpStorageClient struct { - creds *google.Credentials - hc *http.Client - xmlHost string - raw *raw.Service - scheme string - settings *settings - config *storageConfig -} - -// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON -// Storage API. -func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { - s := initSettings(opts...) - o := s.clientOption - config := newStorageConfig(o...) - - var creds *google.Credentials - // In general, it is recommended to use raw.NewService instead of htransport.NewClient - // since raw.NewService configures the correct default endpoints when initializing the - // internal http client. However, in our case, "NewRangeReader" in reader.go needs to - // access the http client directly to make requests, so we create the client manually - // here so it can be re-used by both reader.go and raw.NewService. This means we need to - // manually configure the default endpoint options on the http client. Furthermore, we - // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - // Prepend default options to avoid overriding options passed by the user. - o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) - - o = append(o, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), - internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), - internaloption.WithDefaultUniverseDomain("googleapis.com"), - ) - // Don't error out here. The user may have passed in their own HTTP - // client which does not auth with ADC or other common conventions. - c, err := transport.Creds(ctx, o...) - if err == nil { - creds = c - o = append(o, internaloption.WithCredentials(creds)) - } - } else { - var hostURL *url.URL - - if strings.Contains(host, "://") { - h, err := url.Parse(host) - if err != nil { - return nil, err - } - hostURL = h - } else { - // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST - // URL is only parsed correctly if it has a scheme, so we build it ourselves - hostURL = &url.URL{Scheme: "http", Host: host} - } - - hostURL.Path = "storage/v1/" - endpoint := hostURL.String() - - // Append the emulator host as default endpoint for the user - o = append([]option.ClientOption{option.WithoutAuthentication()}, o...) - - o = append(o, internaloption.WithDefaultEndpoint(endpoint)) - o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint)) - } - s.clientOption = o - - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. - hc, ep, err := htransport.NewClient(ctx, s.clientOption...) - if err != nil { - return nil, fmt.Errorf("dialing: %w", err) - } - // RawService should be created with the chosen endpoint to take account of user override. - rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) - if err != nil { - return nil, fmt.Errorf("storage client: %w", err) - } - // Update xmlHost and scheme with the chosen endpoint. - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) - } - - return &httpStorageClient{ - creds: creds, - hc: hc, - xmlHost: u.Host, - raw: rawService, - scheme: u.Scheme, - settings: s, - config: &config, - }, nil -} - -func (c *httpStorageClient) Close() error { - c.hc.CloseIdleConnections() - return nil -} - -// Top-level methods. - -func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.ServiceAccount.Get(project) - var res *raw.ServiceAccount - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - return res.EmailAddress, nil -} - -func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = bucket - // If there is lifecycle information but no location, explicitly set - // the location. This is a GCS quirk/bug. - if bkt.Location == "" && bkt.Lifecycle != nil { - bkt.Location = "US" - } - req := c.raw.Buckets.Insert(project, bkt) - setClientHeader(req.Header()) - if attrs != nil && attrs.PredefinedACL != "" { - req.PredefinedAcl(attrs.PredefinedACL) - } - if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) - } - if enableObjectRetention != nil { - req.EnableObjectRetention(*enableObjectRetention) - } - var battrs *BucketAttrs - err := run(ctx, func(ctx context.Context) error { - b, err := req.Context(ctx).Do() - if err != nil { - return err - } - battrs, err = newBucket(b) - return err - }, s.retry, s.idempotent) - return battrs, err -} - -func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator { - s := callSettings(c.settings, opts...) - it := &BucketIterator{ - ctx: ctx, - projectID: project, - } - - fetch := func(pageSize int, pageToken string) (token string, err error) { - req := c.raw.Buckets.List(it.projectID) - setClientHeader(req.Header()) - req.Projection("full") - req.Prefix(it.Prefix) - req.PageToken(pageToken) - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Buckets - err = run(it.ctx, func(ctx context.Context) error { - resp, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - for _, item := range resp.Items { - b, err := newBucket(item) - if err != nil { - return "", err - } - it.buckets = append(it.buckets, b) - } - return resp.NextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.buckets) }, - func() interface{} { b := it.buckets; it.buckets = nil; return b }) - - return it -} - -// Bucket methods. - -func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.Buckets.Delete(bucket) - setClientHeader(req.Header()) - if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil { - return err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - - return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) -} - -func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - req := c.raw.Buckets.Get(bucket).Projection("full") - setClientHeader(req.Header()) - err := applyBucketConds("httpStorageClient.GetBucket", conds, req) - if err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - - var resp *raw.Bucket - err = run(ctx, func(ctx context.Context) error { - resp, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp) -} -func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { - s := callSettings(c.settings, opts...) - rb := uattrs.toRawBucket() - req := c.raw.Buckets.Patch(bucket, rb).Projection("full") - setClientHeader(req.Header()) - err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req) - if err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - if uattrs != nil && uattrs.PredefinedACL != "" { - req.PredefinedAcl(uattrs.PredefinedACL) - } - if uattrs != nil && uattrs.PredefinedDefaultObjectACL != "" { - req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) - } - - var rawBucket *raw.Bucket - err = run(ctx, func(ctx context.Context) error { - rawBucket, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return newBucket(rawBucket) -} - -func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - - var metageneration int64 - if conds != nil { - metageneration = conds.MetagenerationMatch - } - req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) - - return run(ctx, func(ctx context.Context) error { - _, err := req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) -} -func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { - s := callSettings(c.settings, opts...) - it := &ObjectIterator{ - ctx: ctx, - } - if q != nil { - it.query = *q - } - fetch := func(pageSize int, pageToken string) (string, error) { - req := c.raw.Objects.List(bucket) - setClientHeader(req.Header()) - projection := it.query.Projection - if projection == ProjectionDefault { - projection = ProjectionFull - } - req.Projection(projection.String()) - req.Delimiter(it.query.Delimiter) - req.Prefix(it.query.Prefix) - req.StartOffset(it.query.StartOffset) - req.EndOffset(it.query.EndOffset) - req.Versions(it.query.Versions) - req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) - req.MatchGlob(it.query.MatchGlob) - req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) - if selection := it.query.toFieldSelection(); selection != "" { - req.Fields("nextPageToken", googleapi.Field(selection)) - } - req.PageToken(pageToken) - if s.userProject != "" { - req.UserProject(s.userProject) - } - if pageSize > 0 { - req.MaxResults(int64(pageSize)) - } - var resp *raw.Objects - var err error - err = run(it.ctx, func(ctx context.Context) error { - resp, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - err = ErrBucketNotExist - } - return "", err - } - for _, item := range resp.Items { - it.items = append(it.items, newObject(item)) - } - for _, prefix := range resp.Prefixes { - it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) - } - return resp.NextPageToken, nil - } - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.items) }, - func() interface{} { b := it.items; it.items = nil; return b }) - - return it -} - -// Object metadata methods. - -func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.Objects.Delete(bucket, object).Context(ctx) - if err := applyConds("Delete", gen, conds, req); err != nil { - return err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - return err -} - -func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx) - if err := applyConds("Attrs", gen, conds, req); err != nil { - return nil, err - } - if s.userProject != "" { - req.UserProject(s.userProject) - } - if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - var err error - err = run(ctx, func(ctx context.Context) error { - obj, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - var e *googleapi.Error - if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { - uattrs := params.uattrs - s := callSettings(c.settings, opts...) - - var attrs ObjectAttrs - // Lists of fields to send, and set to null, in the JSON. - var forceSendFields, nullFields []string - if uattrs.ContentType != nil { - attrs.ContentType = optional.ToString(uattrs.ContentType) - // For ContentType, sending the empty string is a no-op. - // Instead we send a null. - if attrs.ContentType == "" { - nullFields = append(nullFields, "ContentType") - } else { - forceSendFields = append(forceSendFields, "ContentType") - } - } - if uattrs.ContentLanguage != nil { - attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - // For ContentLanguage it's an error to send the empty string. - // Instead we send a null. - if attrs.ContentLanguage == "" { - nullFields = append(nullFields, "ContentLanguage") - } else { - forceSendFields = append(forceSendFields, "ContentLanguage") - } - } - if uattrs.ContentEncoding != nil { - attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - forceSendFields = append(forceSendFields, "ContentEncoding") - } - if uattrs.ContentDisposition != nil { - attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - forceSendFields = append(forceSendFields, "ContentDisposition") - } - if uattrs.CacheControl != nil { - attrs.CacheControl = optional.ToString(uattrs.CacheControl) - forceSendFields = append(forceSendFields, "CacheControl") - } - if uattrs.EventBasedHold != nil { - attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) - forceSendFields = append(forceSendFields, "EventBasedHold") - } - if uattrs.TemporaryHold != nil { - attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - forceSendFields = append(forceSendFields, "TemporaryHold") - } - if !uattrs.CustomTime.IsZero() { - attrs.CustomTime = uattrs.CustomTime - forceSendFields = append(forceSendFields, "CustomTime") - } - if uattrs.Metadata != nil { - attrs.Metadata = uattrs.Metadata - if len(attrs.Metadata) == 0 { - // Sending the empty map is a no-op. We send null instead. - nullFields = append(nullFields, "Metadata") - } else { - forceSendFields = append(forceSendFields, "Metadata") - } - } - if uattrs.ACL != nil { - attrs.ACL = uattrs.ACL - // It's an error to attempt to delete the ACL, so - // we don't append to nullFields here. - forceSendFields = append(forceSendFields, "Acl") - } - if uattrs.Retention != nil { - // For ObjectRetention it's an error to send empty fields. - // Instead we send a null as the user's intention is to remove. - if uattrs.Retention.Mode == "" && uattrs.Retention.RetainUntil.IsZero() { - nullFields = append(nullFields, "Retention") - } else { - attrs.Retention = uattrs.Retention - forceSendFields = append(forceSendFields, "Retention") - } - } - rawObj := attrs.toRawObject(params.bucket) - rawObj.ForceSendFields = forceSendFields - rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(params.bucket, params.object, rawObj).Projection("full") - if err := applyConds("Update", params.gen, params.conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - if uattrs.PredefinedACL != "" { - call.PredefinedAcl(uattrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { - return nil, err - } - - if params.overrideRetention != nil { - call.OverrideUnlockedRetention(*params.overrideRetention) - } - - var obj *raw.Object - var err error - err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) - var e *googleapi.Error - if errors.As(err, &e) && e.Code == http.StatusNotFound { - return nil, ErrObjectNotExist - } - if err != nil { - return nil, err - } - return newObject(obj), nil -} - -// Default Object ACL methods. - -func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) -} - -func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.ObjectAccessControls - var err error - req := c.raw.DefaultObjectAccessControls.List(bucket) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func(ctx context.Context) error { - acls, err = req.Context(ctx).Do() - return err - }, s.retry, true) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil -} -func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - var err error - req := c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func(ctx context.Context) error { - _, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) -} - -// Bucket ACL methods. - -func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) -} - -func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.BucketAccessControls - var err error - req := c.raw.BucketAccessControls.List(bucket) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func(ctx context.Context) error { - acls, err = req.Context(ctx).Do() - return err - }, s.retry, true) - if err != nil { - return nil, err - } - return toBucketACLRules(acls.Items), nil -} - -func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - acl := &raw.BucketAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - var err error - return run(ctx, func(ctx context.Context) error { - _, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) -} - -// configureACLCall sets the context, user project and headers on the apiary library call. -// This will panic if the call does not have the correct methods. -func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) { - vc := reflect.ValueOf(call) - vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) - if userProject != "" { - vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)}) - } - setClientHeader(call.Header()) -} - -// Object ACL methods. - -func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) -} - -// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. -// Selecting a specific generation of this object is not currently supported by the client. -func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) { - s := callSettings(c.settings, opts...) - var acls *raw.ObjectAccessControls - var err error - req := c.raw.ObjectAccessControls.List(bucket, object) - configureACLCall(ctx, s.userProject, req) - err = run(ctx, func(ctx context.Context) error { - acls, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toObjectACLRules(acls.Items), nil -} - -func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - type setRequest interface { - Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) - Header() http.Header - } - - acl := &raw.ObjectAccessControl{ - Bucket: bucket, - Entity: string(entity), - Role: string(role), - } - var err error - req := c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) - configureACLCall(ctx, s.userProject, req) - return run(ctx, func(ctx context.Context) error { - _, err = req.Context(ctx).Do() - return err - }, s.retry, s.idempotent) -} - -// Media operations. - -func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) { - s := callSettings(c.settings, opts...) - rawReq := &raw.ComposeRequest{} - // Compose requires a non-empty Destination, so we always set it, - // even if the caller-provided ObjectAttrs is the zero value. - rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket) - if req.sendCRC32C { - rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C) - } - for _, src := range req.srcs { - srcObj := &raw.ComposeRequestSourceObjects{ - Name: src.name, - } - if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { - return nil, err - } - rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) - } - - call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq) - if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - if req.predefinedACL != "" { - call.DestinationPredefinedAcl(req.predefinedACL) - } - if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { - return nil, err - } - var obj *raw.Object - setClientHeader(call.Header()) - - var err error - retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { - return nil, err - } - return newObject(obj), nil -} -func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) { - s := callSettings(c.settings, opts...) - rawObject := req.dstObject.attrs.toRawObject("") - call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) - - call.Projection("full") - if req.token != "" { - call.RewriteToken(req.token) - } - if req.dstObject.keyName != "" { - call.DestinationKmsKeyName(req.dstObject.keyName) - } - if req.predefinedACL != "" { - call.DestinationPredefinedAcl(req.predefinedACL) - } - if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil { - return nil, err - } - if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil { - return nil, err - } - if s.userProject != "" { - call.UserProject(s.userProject) - } - // Set destination encryption headers. - if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil { - return nil, err - } - // Set source encryption headers. - if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil { - return nil, err - } - - if req.maxBytesRewrittenPerCall != 0 { - call.MaxBytesRewrittenPerCall(req.maxBytesRewrittenPerCall) - } - - var res *raw.RewriteResponse - var err error - setClientHeader(call.Header()) - - retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } - - if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { - return nil, err - } - - r := &rewriteObjectResponse{ - done: res.Done, - written: res.TotalBytesRewritten, - size: res.ObjectSize, - token: res.RewriteToken, - resource: newObject(res.Resource), - } - - return r, nil -} - -func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - - if c.config.useJSONforReads { - return c.newRangeReaderJSON(ctx, params, s) - } - return c.newRangeReaderXML(ctx, params, s) -} - -func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { - u := &url.URL{ - Scheme: c.scheme, - Host: c.xmlHost, - Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), - RawPath: fmt.Sprintf("/%s/%s", params.bucket, url.PathEscape(params.object)), - } - verb := "GET" - if params.length == 0 { - verb = "HEAD" - } - req, err := http.NewRequest(verb, u.String(), nil) - if err != nil { - return nil, err - } - - if s.userProject != "" { - req.Header.Set("X-Goog-User-Project", s.userProject) - } - - if err := setRangeReaderHeaders(req.Header, params); err != nil { - return nil, err - } - - // Set custom headers passed in via the context. This is only required for XML; - // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. - ctxHeaders := callctx.HeadersFromContext(ctx) - for k, vals := range ctxHeaders { - for _, v := range vals { - req.Header.Add(k, v) - } - } - - reopen := readerReopen(ctx, req.Header, params, s, - func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, - func() error { return setConditionsHeaders(req.Header, params.conds) }, - func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) - - res, err := reopen(0) - if err != nil { - return nil, err - } - return parseReadResponse(res, params, reopen) -} - -func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { - call := c.raw.Objects.Get(params.bucket, params.object) - - setClientHeader(call.Header()) - call.Projection("full") - - if s.userProject != "" { - call.UserProject(s.userProject) - } - - if err := setRangeReaderHeaders(call.Header(), params); err != nil { - return nil, err - } - - reopen := readerReopen(ctx, call.Header(), params, s, func(ctx context.Context) (*http.Response, error) { return call.Context(ctx).Download() }, - func() error { return applyConds("NewReader", params.gen, params.conds, call) }, - func() { call.Generation(params.gen) }) - - res, err := reopen(0) - if err != nil { - return nil, err - } - return parseReadResponse(res, params, reopen) -} - -func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { - s := callSettings(c.settings, opts...) - errorf := params.setError - setObj := params.setObj - progress := params.progress - attrs := params.attrs - - mediaOpts := []googleapi.MediaOption{ - googleapi.ChunkSize(params.chunkSize), - } - if c := attrs.ContentType; c != "" { - mediaOpts = append(mediaOpts, googleapi.ContentType(c)) - } - if params.chunkRetryDeadline != 0 { - mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline)) - } - - pr, pw := io.Pipe() - - go func() { - defer close(params.donec) - - rawObj := attrs.toRawObject(params.bucket) - if params.sendCRC32C { - rawObj.Crc32c = encodeUint32(attrs.CRC32C) - } - if attrs.MD5 != nil { - rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5) - } - call := c.raw.Objects.Insert(params.bucket, rawObj). - Media(pr, mediaOpts...). - Projection("full"). - Context(params.ctx). - Name(params.attrs.Name) - call.ProgressUpdater(func(n, _ int64) { progress(n) }) - - if attrs.KMSKeyName != "" { - call.KmsKeyName(attrs.KMSKeyName) - } - if attrs.PredefinedACL != "" { - call.PredefinedAcl(attrs.PredefinedACL) - } - if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { - errorf(err) - pr.CloseWithError(err) - return - } - var resp *raw.Object - err := applyConds("NewWriter", defaultGen, params.conds, call) - if err == nil { - if s.userProject != "" { - call.UserProject(s.userProject) - } - // TODO(tritone): Remove this code when Uploads begin to support - // retry attempt header injection with "client header" injection. - setClientHeader(call.Header()) - - // The internals that perform call.Do automatically retry both the initial - // call to set up the upload as well as calls to upload individual chunks - // for a resumable upload (as long as the chunk size is non-zero). Hence - // there is no need to add retries here. - - // Retry only when the operation is idempotent or the retry policy is RetryAlways. - var useRetry bool - if (s.retry == nil || s.retry.policy == RetryIdempotent) && s.idempotent { - useRetry = true - } else if s.retry != nil && s.retry.policy == RetryAlways { - useRetry = true - } - if useRetry { - if s.retry != nil { - call.WithRetry(s.retry.backoff, s.retry.shouldRetry) - } else { - call.WithRetry(nil, nil) - } - } - resp, err = call.Do() - } - if err != nil { - errorf(err) - pr.CloseWithError(err) - return - } - setObj(newObject(resp)) - }() - - return pw, nil -} - -// IAM methods. - -func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version)) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var rp *raw.Policy - err := run(ctx, func(ctx context.Context) error { - var err error - rp, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return iamFromStoragePolicy(rp), nil -} - -func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - - rp := iamToStoragePolicy(policy) - call := c.raw.Buckets.SetIamPolicy(resource, rp) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - - return run(ctx, func(ctx context.Context) error { - _, err := call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) -} - -func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Buckets.TestIamPermissions(resource, permissions) - setClientHeader(call.Header()) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var res *raw.TestIamPermissionsResponse - err := run(ctx, func(ctx context.Context) error { - var err error - res, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return res.Permissions, nil -} - -// HMAC Key methods. - -func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Get(project, accessID) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var metadata *raw.HmacKeyMetadata - var err error - if err := run(ctx, func(ctx context.Context) error { - metadata, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent); err != nil { - return nil, err - } - hk := &raw.HmacKey{ - Metadata: metadata, - } - return toHMACKeyFromRaw(hk, false) -} - -func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator { - s := callSettings(c.settings, opts...) - it := &HMACKeysIterator{ - ctx: ctx, - raw: c.raw.Projects.HmacKeys, - projectID: project, - retry: s.retry, - } - fetch := func(pageSize int, pageToken string) (token string, err error) { - call := c.raw.Projects.HmacKeys.List(project) - setClientHeader(call.Header()) - if pageToken != "" { - call = call.PageToken(pageToken) - } - if pageSize > 0 { - call = call.MaxResults(int64(pageSize)) - } - if showDeletedKeys { - call = call.ShowDeletedKeys(true) - } - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - if serviceAccountEmail != "" { - call = call.ServiceAccountEmail(serviceAccountEmail) - } - - var resp *raw.HmacKeysMetadata - err = run(it.ctx, func(ctx context.Context) error { - resp, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return "", err - } - - for _, metadata := range resp.Items { - hk := &raw.HmacKey{ - Metadata: metadata, - } - hkey, err := toHMACKeyFromRaw(hk, true) - if err != nil { - return "", err - } - it.hmacKeys = append(it.hmacKeys, hkey) - } - return resp.NextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo( - fetch, - func() int { return len(it.hmacKeys) - it.index }, - func() interface{} { - prev := it.hmacKeys - it.hmacKeys = it.hmacKeys[:0] - it.index = 0 - return prev - }) - return it -} - -func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{ - Etag: attrs.Etag, - State: string(attrs.State), - }) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var metadata *raw.HmacKeyMetadata - var err error - if err := run(ctx, func(ctx context.Context) error { - metadata, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent); err != nil { - return nil, err - } - hk := &raw.HmacKey{ - Metadata: metadata, - } - return toHMACKeyFromRaw(hk, false) -} - -func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - - var hk *raw.HmacKey - if err := run(ctx, func(ctx context.Context) error { - h, err := call.Context(ctx).Do() - hk = h - return err - }, s.retry, s.idempotent); err != nil { - return nil, err - } - return toHMACKeyFromRaw(hk, true) -} - -func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error { - s := callSettings(c.settings, opts...) - call := c.raw.Projects.HmacKeys.Delete(project, accessID) - if s.userProject != "" { - call = call.UserProject(s.userProject) - } - return run(ctx, func(ctx context.Context) error { - return call.Context(ctx).Do() - }, s.retry, s.idempotent) -} - -// Notification methods. - -// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID. -// -// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket, -// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets. -func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.List(bucket) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var res *raw.Notifications - err = run(ctx, func(ctx context.Context) error { - res, err = call.Context(ctx).Do() - return err - }, s.retry, true) - if err != nil { - return nil, err - } - return notificationsToMap(res.Items), nil -} - -func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.Insert(bucket, toRawNotification(n)) - if s.userProject != "" { - call.UserProject(s.userProject) - } - var rn *raw.Notification - err = run(ctx, func(ctx context.Context) error { - rn, err = call.Context(ctx).Do() - return err - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return toNotification(rn), nil -} - -func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - s := callSettings(c.settings, opts...) - call := c.raw.Notifications.Delete(bucket, id) - if s.userProject != "" { - call.UserProject(s.userProject) - } - return run(ctx, func(ctx context.Context) error { - return call.Context(ctx).Do() - }, s.retry, s.idempotent) -} - -type httpReader struct { - body io.ReadCloser - seen int64 - reopen func(seen int64) (*http.Response, error) -} - -func (r *httpReader) Read(p []byte) (int, error) { - n := 0 - for len(p[n:]) > 0 { - m, err := r.body.Read(p[n:]) - n += m - r.seen += int64(m) - if err == nil || err == io.EOF { - return n, err - } - // Read failed (likely due to connection issues), but we will try to reopen - // the pipe and continue. Send a ranged read request that takes into account - // the number of bytes we've already seen. - res, err := r.reopen(r.seen) - if err != nil { - // reopen already retries - return n, err - } - r.body.Close() - r.body = res.Body - } - return n, nil -} - -func (r *httpReader) Close() error { - return r.body.Close() -} - -func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { - if params.readCompressed { - h.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(h, params.encryptionKey, false); err != nil { - return err - } - return nil -} - -// readerReopen initiates a Read with offset and length, assuming we -// have already read seen bytes. -func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, - doDownload func(context.Context) (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { - return func(seen int64) (*http.Response, error) { - // If the context has already expired, return immediately without making a - // call. - if err := ctx.Err(); err != nil { - return nil, err - } - start := params.offset + seen - if params.length < 0 && start < 0 { - header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if params.length < 0 && start > 0 { - header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if params.length > 0 { - // The end character isn't affected by how many bytes we've seen. - header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - if err := applyConditions(); err != nil { - return nil, err - } - // If an object generation is specified, include generation as query string parameters. - if params.gen >= 0 { - setGeneration() - } - - var err error - var res *http.Response - err = run(ctx, func(ctx context.Context) error { - res, err = doDownload(ctx) - if err != nil { - var e *googleapi.Error - if errors.As(err, &e) { - if e.Code == http.StatusNotFound { - return ErrObjectNotExist - } - } - return err - } - - if res.StatusCode == http.StatusNotFound { - // this check is necessary only for XML - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && params.length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - params.gen = gen64 - } - return nil - }, s.retry, s.idempotent) - if err != nil { - return nil, err - } - return res, nil - } -} - -func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (*Reader, error) { - var err error - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - // Content range is formatted -/. We take - // the total size. - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } - - remain := res.ContentLength - body := res.Body - // If the user requested zero bytes, explicitly close and remove the request - // body. - if params.length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } - } - - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } - } - - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: params.gen, - Metageneration: metaGen, - } - return &Reader{ - Attrs: attrs, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reader: &httpReader{ - reopen: reopen, - body: body, - }, - }, nil -} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go deleted file mode 100644 index 4c01bff4f..000000000 --- a/vendor/cloud.google.com/go/storage/iam.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - - "cloud.google.com/go/iam" - "cloud.google.com/go/iam/apiv1/iampb" - "cloud.google.com/go/internal/trace" - raw "google.golang.org/api/storage/v1" - "google.golang.org/genproto/googleapis/type/expr" -) - -// IAM provides access to IAM access control for the bucket. -func (b *BucketHandle) IAM() *iam.Handle { - return iam.InternalNewHandleClient(&iamClient{ - userProject: b.userProject, - retry: b.retry, - client: b.c, - }, b.name) -} - -// iamClient implements the iam.client interface. -type iamClient struct { - userProject string - retry *retryConfig - client *Client -} - -func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { - return c.GetWithVersion(ctx, resource, 1) -} - -func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, c.retry, c.userProject) - return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...) -} - -func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") - defer func() { trace.EndSpan(ctx, err) }() - - isIdempotent := len(p.Etag) > 0 - o := makeStorageOpts(isIdempotent, c.retry, c.userProject) - return c.client.tc.SetIamPolicy(ctx, resource, p, o...) -} - -func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") - defer func() { trace.EndSpan(ctx, err) }() - - o := makeStorageOpts(true, c.retry, c.userProject) - return c.client.tc.TestIamPermissions(ctx, resource, perms, o...) -} - -func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { - return &raw.Policy{ - Bindings: iamToStorageBindings(ip.Bindings), - Etag: string(ip.Etag), - Version: int64(ip.Version), - } -} - -func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { - var rbs []*raw.PolicyBindings - for _, ib := range ibs { - rbs = append(rbs, &raw.PolicyBindings{ - Role: ib.Role, - Members: ib.Members, - Condition: iamToStorageCondition(ib.Condition), - }) - } - return rbs -} - -func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { - if exprpb == nil { - return nil - } - return &raw.Expr{ - Expression: exprpb.Expression, - Description: exprpb.Description, - Location: exprpb.Location, - Title: exprpb.Title, - } -} - -func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { - return &iampb.Policy{ - Bindings: iamFromStorageBindings(rp.Bindings), - Etag: []byte(rp.Etag), - } -} - -func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { - var ibs []*iampb.Binding - for _, rb := range rbs { - ibs = append(ibs, &iampb.Binding{ - Role: rb.Role, - Members: rb.Members, - Condition: iamFromStorageCondition(rb.Condition), - }) - } - return ibs -} - -func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { - if rawexpr == nil { - return nil - } - return &expr.Expr{ - Expression: rawexpr.Expression, - Description: rawexpr.Description, - Location: rawexpr.Location, - Title: rawexpr.Title, - } -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go deleted file mode 100644 index 415b2b585..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package storage - -import ( - storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" - "google.golang.org/api/iterator" -) - -// BucketIterator manages a stream of *storagepb.Bucket. -type BucketIterator struct { - items []*storagepb.Bucket - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *BucketIterator) Next() (*storagepb.Bucket, error) { - var item *storagepb.Bucket - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *BucketIterator) bufLen() int { - return len(it.items) -} - -func (it *BucketIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. -type NotificationConfigIterator struct { - items []*storagepb.NotificationConfig - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { - var item *storagepb.NotificationConfig - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationConfigIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationConfigIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// ObjectIterator manages a stream of *storagepb.Object. -type ObjectIterator struct { - items []*storagepb.Object - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ObjectIterator) Next() (*storagepb.Object, error) { - var item *storagepb.Object - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ObjectIterator) bufLen() int { - return len(it.items) -} - -func (it *ObjectIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go deleted file mode 100644 index 5e2a8f0ad..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -// Package storage is an auto-generated package for the -// Cloud Storage API. -// -// Stop. This folder is likely not what you are looking for. This folder -// contains protocol buffer definitions for an API only accessible to select -// customers. Customers not participating should not depend on this file. -// Please contact Google Cloud sales if you are interested. Unless told -// otherwise by a Google Cloud representative, do not use or otherwise rely -// on any of the contents of this folder. If you would like to use Cloud -// Storage, please consult our official documentation (at -// https://cloud.google.com/storage/docs/apis) for details on our XML and -// JSON APIs, or else consider one of our client libraries (at -// https://cloud.google.com/storage/docs/reference/libraries). This API -// defined in this folder is unreleased and may shut off, break, or fail at -// any time for any users who are not registered as a part of a private -// preview program. -// -// # General documentation -// -// For information that is relevant for all client libraries please reference -// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this -// page includes: -// -// - [Authentication and Authorization] -// - [Timeouts and Cancellation] -// - [Testing against Client Libraries] -// - [Debugging Client Libraries] -// - [Inspecting errors] -// -// # Example usage -// -// To get started with this package, create a client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// -// The client will use your default application credentials. Clients should be reused instead of created as needed. -// The methods of Client are safe for concurrent use by multiple goroutines. -// The returned client must be Closed when it is done being used. -// -// # Using the Client -// -// The following is an example of making an API call with the newly created client. -// -// ctx := context.Background() -// // This snippet has been automatically generated and should be regarded as a code template only. -// // It will require modifications to work: -// // - It may require correct/in-range values for request initialization. -// // - It may require specifying regional endpoints when creating the service client as shown in: -// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options -// c, err := storage.NewClient(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// defer c.Close() -// stream, err := c.BidiWriteObject(ctx) -// if err != nil { -// // TODO: Handle error. -// } -// go func() { -// reqs := []*storagepb.BidiWriteObjectRequest{ -// // TODO: Create requests. -// } -// for _, req := range reqs { -// if err := stream.Send(req); err != nil { -// // TODO: Handle error. -// } -// } -// stream.CloseSend() -// }() -// for { -// resp, err := stream.Recv() -// if err == io.EOF { -// break -// } -// if err != nil { -// // TODO: handle error. -// } -// // TODO: Use resp. -// _ = resp -// } -// -// # Use of Context -// -// The ctx passed to NewClient is used for authentication requests and -// for creating the underlying connection, but is not used for subsequent calls. -// Individual methods on the client use the ctx given to them. -// -// To close the open connection, use the Close() method. -// -// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization -// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation -// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing -// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging -// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors -package storage // import "cloud.google.com/go/storage/internal/apiv2" - -import ( - "context" - - "google.golang.org/api/option" -) - -// For more information on implementing a client constructor hook, see -// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. -type clientHookParams struct{} -type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) - -var versionClient string - -func getVersionClient() string { - if versionClient == "" { - return "UNKNOWN" - } - return versionClient -} - -// DefaultAuthScopes reports the default set of authentication scopes to use with this package. -func DefaultAuthScopes() []string { - return []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write", - } -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json deleted file mode 100644 index 56256bb2c..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", - "language": "go", - "protoPackage": "google.storage.v2", - "libraryPackage": "cloud.google.com/go/storage/internal/apiv2", - "services": { - "Storage": { - "clients": { - "grpc": { - "libraryClient": "Client", - "rpcs": { - "BidiWriteObject": { - "methods": [ - "BidiWriteObject" - ] - }, - "CancelResumableWrite": { - "methods": [ - "CancelResumableWrite" - ] - }, - "ComposeObject": { - "methods": [ - "ComposeObject" - ] - }, - "CreateBucket": { - "methods": [ - "CreateBucket" - ] - }, - "CreateHmacKey": { - "methods": [ - "CreateHmacKey" - ] - }, - "CreateNotificationConfig": { - "methods": [ - "CreateNotificationConfig" - ] - }, - "DeleteBucket": { - "methods": [ - "DeleteBucket" - ] - }, - "DeleteHmacKey": { - "methods": [ - "DeleteHmacKey" - ] - }, - "DeleteNotificationConfig": { - "methods": [ - "DeleteNotificationConfig" - ] - }, - "DeleteObject": { - "methods": [ - "DeleteObject" - ] - }, - "GetBucket": { - "methods": [ - "GetBucket" - ] - }, - "GetHmacKey": { - "methods": [ - "GetHmacKey" - ] - }, - "GetIamPolicy": { - "methods": [ - "GetIamPolicy" - ] - }, - "GetNotificationConfig": { - "methods": [ - "GetNotificationConfig" - ] - }, - "GetObject": { - "methods": [ - "GetObject" - ] - }, - "GetServiceAccount": { - "methods": [ - "GetServiceAccount" - ] - }, - "ListBuckets": { - "methods": [ - "ListBuckets" - ] - }, - "ListHmacKeys": { - "methods": [ - "ListHmacKeys" - ] - }, - "ListNotificationConfigs": { - "methods": [ - "ListNotificationConfigs" - ] - }, - "ListObjects": { - "methods": [ - "ListObjects" - ] - }, - "LockBucketRetentionPolicy": { - "methods": [ - "LockBucketRetentionPolicy" - ] - }, - "QueryWriteStatus": { - "methods": [ - "QueryWriteStatus" - ] - }, - "ReadObject": { - "methods": [ - "ReadObject" - ] - }, - "RestoreObject": { - "methods": [ - "RestoreObject" - ] - }, - "RewriteObject": { - "methods": [ - "RewriteObject" - ] - }, - "SetIamPolicy": { - "methods": [ - "SetIamPolicy" - ] - }, - "StartResumableWrite": { - "methods": [ - "StartResumableWrite" - ] - }, - "TestIamPermissions": { - "methods": [ - "TestIamPermissions" - ] - }, - "UpdateBucket": { - "methods": [ - "UpdateBucket" - ] - }, - "UpdateHmacKey": { - "methods": [ - "UpdateHmacKey" - ] - }, - "UpdateObject": { - "methods": [ - "UpdateObject" - ] - }, - "WriteObject": { - "methods": [ - "WriteObject" - ] - } - } - } - } - } - } -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go deleted file mode 100644 index 47300d7a1..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ /dev/null @@ -1,1917 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go_gapic. DO NOT EDIT. - -package storage - -import ( - "context" - "fmt" - "math" - "net/url" - "regexp" - "strings" - "time" - - iampb "cloud.google.com/go/iam/apiv1/iampb" - storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" - gax "github.com/googleapis/gax-go/v2" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - gtransport "google.golang.org/api/transport/grpc" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/protobuf/proto" -) - -var newClientHook clientHook - -// CallOptions contains the retry settings for each method of Client. -type CallOptions struct { - DeleteBucket []gax.CallOption - GetBucket []gax.CallOption - CreateBucket []gax.CallOption - ListBuckets []gax.CallOption - LockBucketRetentionPolicy []gax.CallOption - GetIamPolicy []gax.CallOption - SetIamPolicy []gax.CallOption - TestIamPermissions []gax.CallOption - UpdateBucket []gax.CallOption - DeleteNotificationConfig []gax.CallOption - GetNotificationConfig []gax.CallOption - CreateNotificationConfig []gax.CallOption - ListNotificationConfigs []gax.CallOption - ComposeObject []gax.CallOption - DeleteObject []gax.CallOption - RestoreObject []gax.CallOption - CancelResumableWrite []gax.CallOption - GetObject []gax.CallOption - ReadObject []gax.CallOption - UpdateObject []gax.CallOption - WriteObject []gax.CallOption - BidiWriteObject []gax.CallOption - ListObjects []gax.CallOption - RewriteObject []gax.CallOption - StartResumableWrite []gax.CallOption - QueryWriteStatus []gax.CallOption - GetServiceAccount []gax.CallOption - CreateHmacKey []gax.CallOption - DeleteHmacKey []gax.CallOption - GetHmacKey []gax.CallOption - ListHmacKeys []gax.CallOption - UpdateHmacKey []gax.CallOption -} - -func defaultGRPCClientOptions() []option.ClientOption { - return []option.ClientOption{ - internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), - internaloption.WithDefaultEndpointTemplate("storage.UNIVERSE_DOMAIN:443"), - internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), - internaloption.WithDefaultUniverseDomain("googleapis.com"), - internaloption.WithDefaultAudience("https://storage.googleapis.com/"), - internaloption.WithDefaultScopes(DefaultAuthScopes()...), - internaloption.EnableJwtWithScope(), - option.WithGRPCDialOption(grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt32))), - } -} - -func defaultCallOptions() *CallOptions { - return &CallOptions{ - DeleteBucket: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetBucket: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateBucket: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListBuckets: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - LockBucketRetentionPolicy: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetIamPolicy: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - SetIamPolicy: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - TestIamPermissions: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - UpdateBucket: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - DeleteNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateNotificationConfig: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListNotificationConfigs: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ComposeObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - DeleteObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - RestoreObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CancelResumableWrite: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ReadObject: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - UpdateObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - WriteObject: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - BidiWriteObject: []gax.CallOption{ - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListObjects: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - RewriteObject: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - StartResumableWrite: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - QueryWriteStatus: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetServiceAccount: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - CreateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - DeleteHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - GetHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - ListHmacKeys: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - UpdateHmacKey: []gax.CallOption{ - gax.WithTimeout(60000 * time.Millisecond), - gax.WithRetry(func() gax.Retryer { - return gax.OnCodes([]codes.Code{ - codes.DeadlineExceeded, - codes.Unavailable, - }, gax.Backoff{ - Initial: 1000 * time.Millisecond, - Max: 60000 * time.Millisecond, - Multiplier: 2.00, - }) - }), - }, - } -} - -// internalClient is an interface that defines the methods available from Cloud Storage API. -type internalClient interface { - Close() error - setGoogleClientInfo(...string) - Connection() *grpc.ClientConn - DeleteBucket(context.Context, *storagepb.DeleteBucketRequest, ...gax.CallOption) error - GetBucket(context.Context, *storagepb.GetBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - CreateBucket(context.Context, *storagepb.CreateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - ListBuckets(context.Context, *storagepb.ListBucketsRequest, ...gax.CallOption) *BucketIterator - LockBucketRetentionPolicy(context.Context, *storagepb.LockBucketRetentionPolicyRequest, ...gax.CallOption) (*storagepb.Bucket, error) - GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) - SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) - TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) - UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error - GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) - ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator - ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error - RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) - GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) - UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) - WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) - BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) - ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator - RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) - StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) - QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) - GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error) - CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) - DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error - GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) - ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator - UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) -} - -// Client is a client for interacting with Cloud Storage API. -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -// -// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through -// the abstractions of buckets and objects. For a description of these -// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). -// -// Resources are named as follows: -// -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. -// -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. -// -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: -// -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). -type Client struct { - // The internal transport-dependent client. - internalClient internalClient - - // The call options for this service. - CallOptions *CallOptions -} - -// Wrapper methods routed to the internal client. - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *Client) Close() error { - return c.internalClient.Close() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *Client) setGoogleClientInfo(keyval ...string) { - c.internalClient.setGoogleClientInfo(keyval...) -} - -// Connection returns a connection to the API service. -// -// Deprecated: Connections are now pooled so this method does not always -// return the same resource. -func (c *Client) Connection() *grpc.ClientConn { - return c.internalClient.Connection() -} - -// DeleteBucket permanently deletes an empty bucket. -func (c *Client) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteBucket(ctx, req, opts...) -} - -// GetBucket returns metadata for the specified bucket. -func (c *Client) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.GetBucket(ctx, req, opts...) -} - -// CreateBucket creates a new bucket. -func (c *Client) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.CreateBucket(ctx, req, opts...) -} - -// ListBuckets retrieves a list of buckets for a given project. -func (c *Client) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { - return c.internalClient.ListBuckets(ctx, req, opts...) -} - -// LockBucketRetentionPolicy locks retention policy on a bucket. -func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) -} - -// GetIamPolicy gets the IAM policy for a specified bucket. -// The resource field in the request should be -// projects/_/buckets/{bucket}. -func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - return c.internalClient.GetIamPolicy(ctx, req, opts...) -} - -// SetIamPolicy updates an IAM policy for the specified bucket. -// The resource field in the request should be -// projects/_/buckets/{bucket}. -func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - return c.internalClient.SetIamPolicy(ctx, req, opts...) -} - -// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if -// any, are held by the caller. -// The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. -func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { - return c.internalClient.TestIamPermissions(ctx, req, opts...) -} - -// UpdateBucket updates a bucket. Equivalent to JSON API’s storage.buckets.patch method. -func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - return c.internalClient.UpdateBucket(ctx, req, opts...) -} - -// DeleteNotificationConfig permanently deletes a NotificationConfig. -func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) -} - -// GetNotificationConfig view a NotificationConfig. -func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.GetNotificationConfig(ctx, req, opts...) -} - -// CreateNotificationConfig creates a NotificationConfig for a given bucket. -// These NotificationConfigs, when triggered, publish messages to the -// specified Pub/Sub topics. See -// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - return c.internalClient.CreateNotificationConfig(ctx, req, opts...) -} - -// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. -func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - return c.internalClient.ListNotificationConfigs(ctx, req, opts...) -} - -// ComposeObject concatenates a list of existing objects into a new object in the same -// bucket. -func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.ComposeObject(ctx, req, opts...) -} - -// DeleteObject deletes an object and its metadata. -// -// Deletions are normally permanent when versioning is disabled or whenever -// the generation parameter is used. However, if soft delete is enabled for -// the bucket, deleted objects can be restored using RestoreObject until the -// soft delete retention period has passed. -func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteObject(ctx, req, opts...) -} - -// RestoreObject restores a soft-deleted object. -func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.RestoreObject(ctx, req, opts...) -} - -// CancelResumableWrite cancels an in-progress resumable upload. -// -// Any attempts to write to the resumable upload after cancelling the upload -// will fail. -// -// The behavior for currently in progress write operations is not guaranteed - -// they could either complete before the cancellation or fail if the -// cancellation completes first. -func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { - return c.internalClient.CancelResumableWrite(ctx, req, opts...) -} - -// GetObject retrieves an object’s metadata. -func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.GetObject(ctx, req, opts...) -} - -// ReadObject reads an object’s data. -func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { - return c.internalClient.ReadObject(ctx, req, opts...) -} - -// UpdateObject updates an object’s metadata. -// Equivalent to JSON API’s storage.objects.patch. -func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - return c.internalClient.UpdateObject(ctx, req, opts...) -} - -// WriteObject stores a new object and metadata. -// -// An object can be written either in a single message stream or in a -// resumable sequence of message streams. To write using a single stream, -// the client should include in the first message of the stream an -// WriteObjectSpec describing the destination bucket, object, and any -// preconditions. Additionally, the final message must set ‘finish_write’ to -// true, or else it is an error. -// -// For a resumable write, the client should instead call -// StartResumableWrite(), populating a WriteObjectSpec into that request. -// They should then attach the returned upload_id to the first message of -// each following call to WriteObject. If the stream is closed before -// finishing the upload (either explicitly by the client or due to a network -// error or an error response from the server), the client should do as -// follows: -// -// Check the result Status of the stream, to determine if writing can be -// resumed on this stream or must be restarted from scratch (by calling -// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED, -// INTERNAL, and UNAVAILABLE. For each case, the client should use binary -// exponential backoff before retrying. Additionally, writes can be -// resumed after RESOURCE_EXHAUSTED errors, but only after taking -// appropriate measures, which may include reducing aggregate send rate -// across clients and/or requesting a quota increase for your project. -// -// If the call to WriteObject returns ABORTED, that indicates -// concurrent attempts to update the resumable write, caused either by -// multiple racing clients or by a single client where the previous -// request was timed out on the client side but nonetheless reached the -// server. In this case the client should take steps to prevent further -// concurrent writes (e.g., increase the timeouts, stop using more than -// one process to perform the upload, etc.), and then should follow the -// steps below for resuming the upload. -// -// For resumable errors, the client should call QueryWriteStatus() and -// then continue writing from the returned persisted_size. This may be -// less than the amount of data the client previously sent. Note also that -// it is acceptable to send data starting at an offset earlier than the -// returned persisted_size; in this case, the service will skip data at -// offsets that were already persisted (without checking that it matches -// the previously written data), and write only the data starting from the -// persisted offset. Even though the data isn’t written, it may still -// incur a performance cost over resuming at the correct write offset. -// This behavior can make client-side handling simpler in some cases. -// -// Clients must only send data that is a multiple of 256 KiB per message, -// unless the object is being finished with finish_write set to true. -// -// The service will not view the object as complete until the client has -// sent a WriteObjectRequest with finish_write set to true. Sending any -// requests on a stream after sending a request with finish_write set to -// true will cause an error. The client should check the response it -// receives to determine how much data the service was able to commit and -// whether the service views the object as complete. -// -// Attempting to resume an already finalized object will result in an OK -// status, with a WriteObjectResponse containing the finalized object’s -// metadata. -// -// Alternatively, the BidiWriteObject operation may be used to write an -// object with controls over flushing and the ability to fetch the ability to -// determine the current persisted size. -func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - return c.internalClient.WriteObject(ctx, opts...) -} - -// BidiWriteObject stores a new object and metadata. -// -// This is similar to the WriteObject call with the added support for -// manual flushing of persisted state, and the ability to determine current -// persisted size without closing the stream. -// -// The client may specify one or both of the state_lookup and flush fields -// in each BidiWriteObjectRequest. If flush is specified, the data written -// so far will be persisted to storage. If state_lookup is specified, the -// service will respond with a BidiWriteObjectResponse that contains the -// persisted size. If both flush and state_lookup are specified, the flush -// will always occur before a state_lookup, so that both may be set in the -// same request and the returned state will be the state of the object -// post-flush. When the stream is closed, a BidiWriteObjectResponse will -// always be sent to the client, regardless of the value of state_lookup. -func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { - return c.internalClient.BidiWriteObject(ctx, opts...) -} - -// ListObjects retrieves a list of objects matching the criteria. -func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { - return c.internalClient.ListObjects(ctx, req, opts...) -} - -// RewriteObject rewrites a source object to a destination object. Optionally overrides -// metadata. -func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { - return c.internalClient.RewriteObject(ctx, req, opts...) -} - -// StartResumableWrite starts a resumable write. How long the write operation remains valid, and -// what happens when the write operation becomes invalid, are -// service-dependent. -func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { - return c.internalClient.StartResumableWrite(ctx, req, opts...) -} - -// QueryWriteStatus determines the persisted_size for an object that is being written, which -// can then be used as the write_offset for the next Write() call. -// -// If the object does not exist (i.e., the object has been deleted, or the -// first Write() has not yet reached the service), this method returns the -// error NOT_FOUND. -// -// The client may call QueryWriteStatus() at any time to determine how -// much data has been processed for this object. This is useful if the -// client is buffering data and needs to know which data can be safely -// evicted. For any sequence of QueryWriteStatus() calls for a given -// object name, the sequence of returned persisted_size values will be -// non-decreasing. -func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { - return c.internalClient.QueryWriteStatus(ctx, req, opts...) -} - -// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account. -func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - return c.internalClient.GetServiceAccount(ctx, req, opts...) -} - -// CreateHmacKey creates a new HMAC key for the given service account. -func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - return c.internalClient.CreateHmacKey(ctx, req, opts...) -} - -// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state. -func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteHmacKey(ctx, req, opts...) -} - -// GetHmacKey gets an existing HMAC key metadata for the given id. -func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.GetHmacKey(ctx, req, opts...) -} - -// ListHmacKeys lists HMAC keys under a given project with the additional filters provided. -func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - return c.internalClient.ListHmacKeys(ctx, req, opts...) -} - -// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE. -func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - return c.internalClient.UpdateHmacKey(ctx, req, opts...) -} - -// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport. -// -// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. -type gRPCClient struct { - // Connection pool of gRPC connections to the service. - connPool gtransport.ConnPool - - // Points back to the CallOptions field of the containing Client - CallOptions **CallOptions - - // The gRPC API client. - client storagepb.StorageClient - - // The x-goog-* metadata to be sent with each request. - xGoogHeaders []string -} - -// NewClient creates a new storage client based on gRPC. -// The returned client must be Closed when it is done being used to clean up its underlying connections. -// -// API Overview and Naming SyntaxThe Cloud Storage gRPC API allows applications to read and write data through -// the abstractions of buckets and objects. For a description of these -// abstractions please see https://cloud.google.com/storage/docs (at https://cloud.google.com/storage/docs). -// -// Resources are named as follows: -// -// Projects are referred to as they are defined by the Resource Manager API, -// using strings like projects/123456 or projects/my-string-id. -// -// Buckets are named using string names of the form: -// projects/{project}/buckets/{bucket} -// For globally unique buckets, _ may be substituted for the project. -// -// Objects are uniquely identified by their name along with the name of the -// bucket they belong to, as separate strings in this API. For example: -// -// ReadObjectRequest { -// bucket: ‘projects/_/buckets/my-bucket’ -// object: ‘my-object’ -// } -// Note that object names can contain / characters, which are treated as -// any other character (no special directory semantics). -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - clientOpts := defaultGRPCClientOptions() - if newClientHook != nil { - hookOpts, err := newClientHook(ctx, clientHookParams{}) - if err != nil { - return nil, err - } - clientOpts = append(clientOpts, hookOpts...) - } - - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) - if err != nil { - return nil, err - } - client := Client{CallOptions: defaultCallOptions()} - - c := &gRPCClient{ - connPool: connPool, - client: storagepb.NewStorageClient(connPool), - CallOptions: &client.CallOptions, - } - c.setGoogleClientInfo() - - client.internalClient = c - - return &client, nil -} - -// Connection returns a connection to the API service. -// -// Deprecated: Connections are now pooled so this method does not always -// return the same resource. -func (c *gRPCClient) Connection() *grpc.ClientConn { - return c.connPool.Conn() -} - -// setGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", gax.GoVersion}, keyval...) - kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *gRPCClient) Close() error { - return c.connPool.Close() -} - -func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBucketsRequest, opts ...gax.CallOption) *BucketIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) - it := &BucketIterator{} - req = proto.Clone(req).(*storagepb.ListBucketsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Bucket, string, error) { - resp := &storagepb.ListBucketsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetBuckets(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) - var resp *iampb.Policy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) - var resp *iampb.Policy - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) - var resp *iampb.TestIamPermissionsResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) - var resp *storagepb.Bucket - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) - var resp *storagepb.NotificationConfig - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) - it := &NotificationConfigIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { - resp := &storagepb.ListNotificationConfigsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteObject(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).RestoreObject[0:len((*c.CallOptions).RestoreObject):len((*c.CallOptions).RestoreObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) - var resp *storagepb.CancelResumableWriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) - var resp storagepb.Storage_ReadObjectClient - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ReadObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) - var resp *storagepb.Object - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) - var resp storagepb.Storage_WriteObjectClient - opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.WriteObject(ctx, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) - var resp storagepb.Storage_BidiWriteObjectClient - opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) - it := &ObjectIterator{} - req = proto.Clone(req).(*storagepb.ListObjectsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Object, string, error) { - resp := &storagepb.ListObjectsResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListObjects(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetObjects(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 { - routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1]) - } - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) - var resp *storagepb.RewriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) - var resp *storagepb.StartResumableWriteResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) - var resp *storagepb.QueryWriteStatusResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) - var resp *storagepb.ServiceAccount - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) - var resp *storagepb.CreateHmacKeyResponse - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - return err -} - -func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} - -func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) - it := &HmacKeyMetadataIterator{} - req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) { - resp := &storagepb.ListHmacKeysResponse{} - if pageToken != "" { - req.PageToken = pageToken - } - if pageSize > math.MaxInt32 { - req.PageSize = math.MaxInt32 - } else if pageSize != 0 { - req.PageSize = int32(pageSize) - } - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, "", err - } - - it.Response = resp - return resp.GetHmacKeys(), resp.GetNextPageToken(), nil - } - fetch := func(pageSize int, pageToken string) (string, error) { - items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) - if err != nil { - return "", err - } - it.items = append(it.items, items...) - return nextPageToken, nil - } - - it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) - it.pageInfo.MaxSize = int(req.GetPageSize()) - it.pageInfo.Token = req.GetPageToken() - - return it -} - -func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) { - routingHeaders := "" - routingHeadersMap := make(map[string]string) - if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 { - routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1]) - } - for headerName, headerValue := range routingHeadersMap { - routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) - } - routingHeaders = strings.TrimSuffix(routingHeaders, "&") - hds := []string{"x-goog-request-params", routingHeaders} - - hds = append(c.xGoogHeaders, hds...) - ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) - opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) - var resp *storagepb.HmacKeyMetadata - err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { - var err error - resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...) - return err - }, opts...) - if err != nil { - return nil, err - } - return resp, nil -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go deleted file mode 100644 index d08b9b697..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ /dev/null @@ -1,11659 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 -// source: google/storage/v2/storage.proto - -package storagepb - -import ( - context "context" - reflect "reflect" - sync "sync" - - iampb "cloud.google.com/go/iam/apiv1/iampb" - _ "google.golang.org/genproto/googleapis/api/annotations" - date "google.golang.org/genproto/googleapis/type/date" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - emptypb "google.golang.org/protobuf/types/known/emptypb" - fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// A collection of constant values meaningful to the Storage API. -type ServiceConstants_Values int32 - -const ( - // Unused. Proto3 requires first enum to be 0. - ServiceConstants_VALUES_UNSPECIFIED ServiceConstants_Values = 0 - // The maximum size chunk that can will be returned in a single - // ReadRequest. - // 2 MiB. - ServiceConstants_MAX_READ_CHUNK_BYTES ServiceConstants_Values = 2097152 - // The maximum size chunk that can be sent in a single WriteObjectRequest. - // 2 MiB. - ServiceConstants_MAX_WRITE_CHUNK_BYTES ServiceConstants_Values = 2097152 - // The maximum size of an object in MB - whether written in a single stream - // or composed from multiple other objects. - // 5 TiB. - ServiceConstants_MAX_OBJECT_SIZE_MB ServiceConstants_Values = 5242880 - // The maximum length field name that can be sent in a single - // custom metadata field. - // 1 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_FIELD_NAME_BYTES ServiceConstants_Values = 1024 - // The maximum length field value that can be sent in a single - // custom_metadata field. - // 4 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES ServiceConstants_Values = 4096 - // The maximum total bytes that can be populated into all field names and - // values of the custom_metadata for one object. - // 8 KiB. - ServiceConstants_MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 8192 - // The maximum total bytes that can be populated into all bucket metadata - // fields. - // 20 KiB. - ServiceConstants_MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES ServiceConstants_Values = 20480 - // The maximum number of NotificationConfigs that can be registered - // for a given bucket. - ServiceConstants_MAX_NOTIFICATION_CONFIGS_PER_BUCKET ServiceConstants_Values = 100 - // The maximum number of LifecycleRules that can be registered for a given - // bucket. - ServiceConstants_MAX_LIFECYCLE_RULES_PER_BUCKET ServiceConstants_Values = 100 - // The maximum number of custom attributes per NotificationConfigs. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTES ServiceConstants_Values = 5 - // The maximum length of a custom attribute key included in - // NotificationConfig. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH ServiceConstants_Values = 256 - // The maximum length of a custom attribute value included in a - // NotificationConfig. - ServiceConstants_MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH ServiceConstants_Values = 1024 - // The maximum number of key/value entries per bucket label. - ServiceConstants_MAX_LABELS_ENTRIES_COUNT ServiceConstants_Values = 64 - // The maximum character length of the key or value in a bucket - // label map. - ServiceConstants_MAX_LABELS_KEY_VALUE_LENGTH ServiceConstants_Values = 63 - // The maximum byte size of the key or value in a bucket label - // map. - ServiceConstants_MAX_LABELS_KEY_VALUE_BYTES ServiceConstants_Values = 128 - // The maximum number of object IDs that can be included in a - // DeleteObjectsRequest. - ServiceConstants_MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST ServiceConstants_Values = 1000 - // The maximum number of days for which a token returned by the - // GetListObjectsSplitPoints RPC is valid. - ServiceConstants_SPLIT_TOKEN_MAX_VALID_DAYS ServiceConstants_Values = 14 -) - -// Enum value maps for ServiceConstants_Values. -var ( - ServiceConstants_Values_name = map[int32]string{ - 0: "VALUES_UNSPECIFIED", - 2097152: "MAX_READ_CHUNK_BYTES", - // Duplicate value: 2097152: "MAX_WRITE_CHUNK_BYTES", - 5242880: "MAX_OBJECT_SIZE_MB", - 1024: "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES", - 4096: "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES", - 8192: "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES", - 20480: "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES", - 100: "MAX_NOTIFICATION_CONFIGS_PER_BUCKET", - // Duplicate value: 100: "MAX_LIFECYCLE_RULES_PER_BUCKET", - 5: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES", - 256: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH", - // Duplicate value: 1024: "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH", - 64: "MAX_LABELS_ENTRIES_COUNT", - 63: "MAX_LABELS_KEY_VALUE_LENGTH", - 128: "MAX_LABELS_KEY_VALUE_BYTES", - 1000: "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST", - 14: "SPLIT_TOKEN_MAX_VALID_DAYS", - } - ServiceConstants_Values_value = map[string]int32{ - "VALUES_UNSPECIFIED": 0, - "MAX_READ_CHUNK_BYTES": 2097152, - "MAX_WRITE_CHUNK_BYTES": 2097152, - "MAX_OBJECT_SIZE_MB": 5242880, - "MAX_CUSTOM_METADATA_FIELD_NAME_BYTES": 1024, - "MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES": 4096, - "MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES": 8192, - "MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES": 20480, - "MAX_NOTIFICATION_CONFIGS_PER_BUCKET": 100, - "MAX_LIFECYCLE_RULES_PER_BUCKET": 100, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTES": 5, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH": 256, - "MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH": 1024, - "MAX_LABELS_ENTRIES_COUNT": 64, - "MAX_LABELS_KEY_VALUE_LENGTH": 63, - "MAX_LABELS_KEY_VALUE_BYTES": 128, - "MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST": 1000, - "SPLIT_TOKEN_MAX_VALID_DAYS": 14, - } -) - -func (x ServiceConstants_Values) Enum() *ServiceConstants_Values { - p := new(ServiceConstants_Values) - *p = x - return p -} - -func (x ServiceConstants_Values) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceConstants_Values) Descriptor() protoreflect.EnumDescriptor { - return file_google_storage_v2_storage_proto_enumTypes[0].Descriptor() -} - -func (ServiceConstants_Values) Type() protoreflect.EnumType { - return &file_google_storage_v2_storage_proto_enumTypes[0] -} - -func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceConstants_Values.Descriptor instead. -func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} -} - -// Request message for DeleteBucket. -type DeleteBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // If set, only deletes the bucket if its metageneration matches this value. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, only deletes the bucket if its metageneration does not match this - // value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` -} - -func (x *DeleteBucketRequest) Reset() { - *x = DeleteBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteBucketRequest) ProtoMessage() {} - -func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteBucketRequest.ProtoReflect.Descriptor instead. -func (*DeleteBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{0} -} - -func (x *DeleteBucketRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *DeleteBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *DeleteBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -// Request message for GetBucket. -type GetBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // If set, and if the bucket's current metageneration does not match the - // specified value, the request will return an error. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, and if the bucket's current metageneration matches the specified - // value, the request will return an error. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Mask specifying which fields to read. - // A "*" field may be used to indicate all fields. - // If no mask is specified, will default to all fields. - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *GetBucketRequest) Reset() { - *x = GetBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetBucketRequest) ProtoMessage() {} - -func (x *GetBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetBucketRequest.ProtoReflect.Descriptor instead. -func (*GetBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{1} -} - -func (x *GetBucketRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GetBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Request message for CreateBucket. -type CreateBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to which this bucket will belong. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Properties of the new bucket being inserted. - // The name of the bucket is specified in the `bucket_id` field. Populating - // `bucket.name` field will result in an error. - // The project of the bucket must be specified in the `bucket.project` field. - // This field must be in `projects/{projectIdentifier}` format, - // {projectIdentifier} can be the project ID or project number. The `parent` - // field must be either empty or `projects/_`. - Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The ID to use for this bucket, which will become the final - // component of the bucket's resource name. For example, the value `foo` might - // result in a bucket with the name `projects/123456/buckets/foo`. - BucketId string `protobuf:"bytes,3,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // Apply a predefined set of access controls to this bucket. - // Valid values are "authenticatedRead", "private", "projectPrivate", - // "publicRead", or "publicReadWrite". - PredefinedAcl string `protobuf:"bytes,6,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Apply a predefined set of default object access controls to this bucket. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedDefaultObjectAcl string `protobuf:"bytes,7,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` -} - -func (x *CreateBucketRequest) Reset() { - *x = CreateBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateBucketRequest) ProtoMessage() {} - -func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateBucketRequest.ProtoReflect.Descriptor instead. -func (*CreateBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{2} -} - -func (x *CreateBucketRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateBucketRequest) GetBucket() *Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *CreateBucketRequest) GetBucketId() string { - if x != nil { - return x.BucketId - } - return "" -} - -func (x *CreateBucketRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *CreateBucketRequest) GetPredefinedDefaultObjectAcl() string { - if x != nil { - return x.PredefinedDefaultObjectAcl - } - return "" -} - -// Request message for ListBuckets. -type ListBucketsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project whose buckets we are listing. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of buckets to return in a single response. The service will - // use this parameter or 1,000 items, whichever is smaller. If "acl" is - // present in the read_mask, the service will use this parameter of 200 items, - // whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // Filter results to buckets whose names begin with this prefix. - Prefix string `protobuf:"bytes,4,opt,name=prefix,proto3" json:"prefix,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.owner, - // items.acl, and items.default_object_acl. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *ListBucketsRequest) Reset() { - *x = ListBucketsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsRequest) ProtoMessage() {} - -func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. -func (*ListBucketsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{3} -} - -func (x *ListBucketsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListBucketsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListBucketsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListBucketsRequest) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// The result of a call to Buckets.ListBuckets -type ListBucketsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListBucketsResponse) Reset() { - *x = ListBucketsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsResponse) ProtoMessage() {} - -func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. -func (*ListBucketsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{4} -} - -func (x *ListBucketsResponse) GetBuckets() []*Bucket { - if x != nil { - return x.Buckets - } - return nil -} - -func (x *ListBucketsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request message for LockBucketRetentionPolicyRequest. -type LockBucketRetentionPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a bucket. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. Makes the operation conditional on whether bucket's current - // metageneration matches the given value. Must be positive. - IfMetagenerationMatch int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3" json:"if_metageneration_match,omitempty"` -} - -func (x *LockBucketRetentionPolicyRequest) Reset() { - *x = LockBucketRetentionPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LockBucketRetentionPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LockBucketRetentionPolicyRequest) ProtoMessage() {} - -func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LockBucketRetentionPolicyRequest.ProtoReflect.Descriptor instead. -func (*LockBucketRetentionPolicyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{5} -} - -func (x *LockBucketRetentionPolicyRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *LockBucketRetentionPolicyRequest) GetIfMetagenerationMatch() int64 { - if x != nil { - return x.IfMetagenerationMatch - } - return 0 -} - -// Request for UpdateBucket method. -type UpdateBucketRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to update. - // The bucket's `name` field will be used to identify the bucket. - Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // If set, will only modify the bucket if its metageneration matches this - // value. - IfMetagenerationMatch *int64 `protobuf:"varint,2,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // If set, will only modify the bucket if its metageneration does not match - // this value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Apply a predefined set of access controls to this bucket. - // Valid values are "authenticatedRead", "private", "projectPrivate", - // "publicRead", or "publicReadWrite". - PredefinedAcl string `protobuf:"bytes,8,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Apply a predefined set of default object access controls to this bucket. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedDefaultObjectAcl string `protobuf:"bytes,9,opt,name=predefined_default_object_acl,json=predefinedDefaultObjectAcl,proto3" json:"predefined_default_object_acl,omitempty"` - // Required. List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateBucketRequest) Reset() { - *x = UpdateBucketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateBucketRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateBucketRequest) ProtoMessage() {} - -func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateBucketRequest.ProtoReflect.Descriptor instead. -func (*UpdateBucketRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateBucketRequest) GetBucket() *Bucket { - if x != nil { - return x.Bucket - } - return nil -} - -func (x *UpdateBucketRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *UpdateBucketRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *UpdateBucketRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { - if x != nil { - return x.PredefinedDefaultObjectAcl - } - return "" -} - -func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Request message for DeleteNotificationConfig. -type DeleteNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *DeleteNotificationConfigRequest) Reset() { - *x = DeleteNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNotificationConfigRequest) ProtoMessage() {} - -func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for GetNotificationConfig. -type GetNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The parent bucket of the NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetNotificationConfigRequest) Reset() { - *x = GetNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetNotificationConfigRequest) ProtoMessage() {} - -func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} -} - -func (x *GetNotificationConfigRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// Request message for CreateNotificationConfig. -type CreateNotificationConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The bucket to which this NotificationConfig belongs. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the NotificationConfig to be inserted. - NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` -} - -func (x *CreateNotificationConfigRequest) Reset() { - *x = CreateNotificationConfigRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNotificationConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNotificationConfigRequest) ProtoMessage() {} - -func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} -} - -func (x *CreateNotificationConfigRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { - if x != nil { - return x.NotificationConfig - } - return nil -} - -// Request message for ListNotifications. -type ListNotificationConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of a Google Cloud Storage bucket. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of NotificationConfigs to return. The service may - // return fewer than this value. The default value is 100. Specifying a value - // above 100 will result in a page_size of 100. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotificationConfigs` call. - // Provide this to retrieve the subsequent page. - // - // When paginating, all other parameters provided to `ListNotificationConfigs` - // must match the call that provided the page token. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListNotificationConfigsRequest) Reset() { - *x = ListNotificationConfigsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsRequest) ProtoMessage() {} - -func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} -} - -func (x *ListNotificationConfigsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListNotificationConfigsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListNotificationConfigsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The result of a call to ListNotificationConfigs -type ListNotificationConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` - // A token, which can be sent as `page_token` to retrieve the next page. - // If this field is omitted, there are no subsequent pages. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListNotificationConfigsResponse) Reset() { - *x = ListNotificationConfigsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListNotificationConfigsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListNotificationConfigsResponse) ProtoMessage() {} - -func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} -} - -func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { - if x != nil { - return x.NotificationConfigs - } - return nil -} - -func (x *ListNotificationConfigsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request message for ComposeObject. -type ComposeObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Properties of the resulting object. - Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - // The list of source objects that will be concatenated into a single object. - SourceObjects []*ComposeObjectRequest_SourceObject `protobuf:"bytes,2,rep,name=source_objects,json=sourceObjects,proto3" json:"source_objects,omitempty"` - // Apply a predefined set of access controls to the destination object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - DestinationPredefinedAcl string `protobuf:"bytes,9,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Resource name of the Cloud KMS key, of the form - // `projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key`, - // that will be used to encrypt the object. Overrides the object - // metadata's `kms_key_name` value, if any. - KmsKey string `protobuf:"bytes,6,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,7,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be validated against the - // combined checksums of the component objects. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,10,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *ComposeObjectRequest) Reset() { - *x = ComposeObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest) ProtoMessage() {} - -func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12} -} - -func (x *ComposeObjectRequest) GetDestination() *Object { - if x != nil { - return x.Destination - } - return nil -} - -func (x *ComposeObjectRequest) GetSourceObjects() []*ComposeObjectRequest_SourceObject { - if x != nil { - return x.SourceObjects - } - return nil -} - -func (x *ComposeObjectRequest) GetDestinationPredefinedAcl() string { - if x != nil { - return x.DestinationPredefinedAcl - } - return "" -} - -func (x *ComposeObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *ComposeObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *ComposeObjectRequest) GetKmsKey() string { - if x != nil { - return x.KmsKey - } - return "" -} - -func (x *ComposeObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *ComposeObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// Message for deleting an object. -// `bucket` and `object` **must** be set. -type DeleteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which the object resides. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The name of the finalized object to delete. - // Note: If you want to delete an unfinalized resumable upload please use - // `CancelResumableWrite`. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, permanently deletes a specific revision of this object (as - // opposed to the latest version, the default). - Generation int64 `protobuf:"varint,4,opt,name=generation,proto3" json:"generation,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,5,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *DeleteObjectRequest) Reset() { - *x = DeleteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteObjectRequest) ProtoMessage() {} - -func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead. -func (*DeleteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13} -} - -func (x *DeleteObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *DeleteObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *DeleteObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Message for restoring an object. -// `bucket`, `object`, and `generation` **must** be set. -type RestoreObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which the object resides. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The name of the object to restore. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // Required. The specific revision of the object to restore. - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // If false or unset, the bucket's default object ACL will be used. - // If true, copy the source object's access controls. - // Return an error if bucket has UBLA enabled. - CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *RestoreObjectRequest) Reset() { - *x = RestoreObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RestoreObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RestoreObjectRequest) ProtoMessage() {} - -func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. -func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} -} - -func (x *RestoreObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *RestoreObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *RestoreObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *RestoreObjectRequest) GetCopySourceAcl() bool { - if x != nil && x.CopySourceAcl != nil { - return *x.CopySourceAcl - } - return false -} - -func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Message for canceling an in-progress resumable upload. -// `upload_id` **must** be set. -type CancelResumableWriteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The upload_id of the resumable upload to cancel. This should be - // copied from the `upload_id` field of `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` -} - -func (x *CancelResumableWriteRequest) Reset() { - *x = CancelResumableWriteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelResumableWriteRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelResumableWriteRequest) ProtoMessage() {} - -func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. -func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} -} - -func (x *CancelResumableWriteRequest) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -// Empty response message for canceling an in-progress resumable upload, will be -// extended as needed. -type CancelResumableWriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *CancelResumableWriteResponse) Reset() { - *x = CancelResumableWriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelResumableWriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelResumableWriteResponse) ProtoMessage() {} - -func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. -func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} -} - -// Request message for ReadObject. -type ReadObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the bucket containing the object to read. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. The name of the object to read. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, selects a specific revision of this object (as opposed - // to the latest version, the default). - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // The offset for the first byte to return in the read, relative to the start - // of the object. - // - // A negative `read_offset` value will be interpreted as the number of bytes - // back from the end of the object to be returned. For example, if an object's - // length is 15 bytes, a ReadObjectRequest with `read_offset` = -5 and - // `read_limit` = 3 would return bytes 10 through 12 of the object. Requesting - // a negative offset with magnitude larger than the size of the object will - // return the entire object. - ReadOffset int64 `protobuf:"varint,4,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"` - // The maximum number of `data` bytes the server is allowed to return in the - // sum of all `Object` messages. A `read_limit` of zero indicates that there - // is no limit, and a negative `read_limit` will cause an error. - // - // If the stream returns fewer bytes than allowed by the `read_limit` and no - // error occurred, the stream includes all data from the `read_offset` to the - // end of the resource. - ReadLimit int64 `protobuf:"varint,5,opt,name=read_limit,json=readLimit,proto3" json:"read_limit,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,6,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,8,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // Mask specifying which fields to read. - // The checksummed_data field and its children will always be present. - // If no mask is specified, will default to all fields except metadata.owner - // and metadata.acl. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *ReadObjectRequest) Reset() { - *x = ReadObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadObjectRequest) ProtoMessage() {} - -func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. -func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} -} - -func (x *ReadObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *ReadObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *ReadObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *ReadObjectRequest) GetReadOffset() int64 { - if x != nil { - return x.ReadOffset - } - return 0 -} - -func (x *ReadObjectRequest) GetReadLimit() int64 { - if x != nil { - return x.ReadLimit - } - return 0 -} - -func (x *ReadObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Request message for GetObject. -type GetObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which the object resides. - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - // Required. Name of the object. - Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // If true, return the soft-deleted version of this object. - SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // Mask specifying which fields to read. - // If no mask is specified, will default to all fields except metadata.acl and - // metadata.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` -} - -func (x *GetObjectRequest) Reset() { - *x = GetObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetObjectRequest) ProtoMessage() {} - -func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. -func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} -} - -func (x *GetObjectRequest) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *GetObjectRequest) GetObject() string { - if x != nil { - return x.Object - } - return "" -} - -func (x *GetObjectRequest) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *GetObjectRequest) GetSoftDeleted() bool { - if x != nil && x.SoftDeleted != nil { - return *x.SoftDeleted - } - return false -} - -func (x *GetObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *GetObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -// Response message for ReadObject. -type ReadObjectResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A portion of the data for the object. The service **may** leave `data` - // empty for any given `ReadResponse`. This enables the service to inform the - // client that the request is still live while it is running an operation to - // generate more data. - ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` - // The checksums of the complete object. If the object is downloaded in full, - // the client should compute one of these checksums over the downloaded object - // and compare it against the value provided here. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` - // If read_offset and or read_limit was specified on the - // ReadObjectRequest, ContentRange will be populated on the first - // ReadObjectResponse message of the read stream. - ContentRange *ContentRange `protobuf:"bytes,3,opt,name=content_range,json=contentRange,proto3" json:"content_range,omitempty"` - // Metadata of the object whose media is being returned. - // Only populated in the first response in the stream. - Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *ReadObjectResponse) Reset() { - *x = ReadObjectResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadObjectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadObjectResponse) ProtoMessage() {} - -func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. -func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} -} - -func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { - if x != nil { - return x.ChecksummedData - } - return nil -} - -func (x *ReadObjectResponse) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -func (x *ReadObjectResponse) GetContentRange() *ContentRange { - if x != nil { - return x.ContentRange - } - return nil -} - -func (x *ReadObjectResponse) GetMetadata() *Object { - if x != nil { - return x.Metadata - } - return nil -} - -// Describes an attempt to insert an object, possibly over multiple requests. -type WriteObjectSpec struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Destination object, including its name and its metadata. - Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // Apply a predefined set of access controls to this object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current - // generation matches the given value. Setting to 0 makes the operation - // succeed only if there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live - // generation does not match the given value. If no live object exists, the - // precondition fails. Setting to 0 makes the operation succeed only if - // there is a live version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // The expected final object size being uploaded. - // If this value is set, closing the stream after writing fewer or more than - // `object_size` bytes will result in an OUT_OF_RANGE error. - // - // This situation is considered a client error, and if such an error occurs - // you must start the upload over from scratch, this time sending the correct - // number of bytes. - ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` -} - -func (x *WriteObjectSpec) Reset() { - *x = WriteObjectSpec{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectSpec) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectSpec) ProtoMessage() {} - -func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. -func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} -} - -func (x *WriteObjectSpec) GetResource() *Object { - if x != nil { - return x.Resource - } - return nil -} - -func (x *WriteObjectSpec) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *WriteObjectSpec) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *WriteObjectSpec) GetObjectSize() int64 { - if x != nil && x.ObjectSize != nil { - return *x.ObjectSize - } - return 0 -} - -// Request message for WriteObject. -type WriteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The first message of each stream should set one of the following. - // - // Types that are assignable to FirstMessage: - // - // *WriteObjectRequest_UploadId - // *WriteObjectRequest_WriteObjectSpec - FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` - // Required. The offset from the beginning of the object at which the data - // should be written. - // - // In the first `WriteObjectRequest` of a `WriteObject()` action, it - // indicates the initial offset for the `Write()` call. The value **must** be - // equal to the `persisted_size` that a call to `QueryWriteStatus()` would - // return (0 if this is the first write to the object). - // - // On subsequent calls, this value **must** be no larger than the sum of the - // first `write_offset` and the sizes of all `data` chunks sent previously on - // this stream. - // - // An incorrect value will cause an error. - WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` - // A portion of the data for the object. - // - // Types that are assignable to Data: - // - // *WriteObjectRequest_ChecksummedData - Data isWriteObjectRequest_Data `protobuf_oneof:"data"` - // Checksums for the complete object. If the checksums computed by the service - // don't match the specified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). - ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` - // If `true`, this indicates that the write is complete. Sending any - // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` - // will cause an error. - // For a non-resumable write (where the upload_id was not set in the first - // message), it is an error not to set this field in the final message of the - // stream. - FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *WriteObjectRequest) Reset() { - *x = WriteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectRequest) ProtoMessage() {} - -func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. -func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} -} - -func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { - if m != nil { - return m.FirstMessage - } - return nil -} - -func (x *WriteObjectRequest) GetUploadId() string { - if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok { - return x.UploadId - } - return "" -} - -func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { - if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok { - return x.WriteObjectSpec - } - return nil -} - -func (x *WriteObjectRequest) GetWriteOffset() int64 { - if x != nil { - return x.WriteOffset - } - return 0 -} - -func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data { - if m != nil { - return m.Data - } - return nil -} - -func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData { - if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok { - return x.ChecksummedData - } - return nil -} - -func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -func (x *WriteObjectRequest) GetFinishWrite() bool { - if x != nil { - return x.FinishWrite - } - return false -} - -func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -type isWriteObjectRequest_FirstMessage interface { - isWriteObjectRequest_FirstMessage() -} - -type WriteObjectRequest_UploadId struct { - // For resumable uploads. This should be the `upload_id` returned from a - // call to `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` -} - -type WriteObjectRequest_WriteObjectSpec struct { - // For non-resumable uploads. Describes the overall upload, including the - // destination bucket and object name, preconditions, etc. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` -} - -func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {} - -func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {} - -type isWriteObjectRequest_Data interface { - isWriteObjectRequest_Data() -} - -type WriteObjectRequest_ChecksummedData struct { - // The data to insert. If a crc32c checksum is provided that doesn't match - // the checksum computed by the service, the request will fail. - ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` -} - -func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {} - -// Response message for WriteObject. -type WriteObjectResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The response will set one of the following. - // - // Types that are assignable to WriteStatus: - // - // *WriteObjectResponse_PersistedSize - // *WriteObjectResponse_Resource - WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` -} - -func (x *WriteObjectResponse) Reset() { - *x = WriteObjectResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WriteObjectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WriteObjectResponse) ProtoMessage() {} - -func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. -func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} -} - -func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { - if m != nil { - return m.WriteStatus - } - return nil -} - -func (x *WriteObjectResponse) GetPersistedSize() int64 { - if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok { - return x.PersistedSize - } - return 0 -} - -func (x *WriteObjectResponse) GetResource() *Object { - if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok { - return x.Resource - } - return nil -} - -type isWriteObjectResponse_WriteStatus interface { - isWriteObjectResponse_WriteStatus() -} - -type WriteObjectResponse_PersistedSize struct { - // The total number of bytes that have been processed for the given object - // from all `WriteObject` calls. Only set if the upload has not finalized. - PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` -} - -type WriteObjectResponse_Resource struct { - // A resource containing the metadata for the uploaded object. Only set if - // the upload has finalized. - Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` -} - -func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} - -func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} - -// Request message for BidiWriteObject. -type BidiWriteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The first message of each stream should set one of the following. - // - // Types that are assignable to FirstMessage: - // - // *BidiWriteObjectRequest_UploadId - // *BidiWriteObjectRequest_WriteObjectSpec - FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` - // Required. The offset from the beginning of the object at which the data - // should be written. - // - // In the first `WriteObjectRequest` of a `WriteObject()` action, it - // indicates the initial offset for the `Write()` call. The value **must** be - // equal to the `persisted_size` that a call to `QueryWriteStatus()` would - // return (0 if this is the first write to the object). - // - // On subsequent calls, this value **must** be no larger than the sum of the - // first `write_offset` and the sizes of all `data` chunks sent previously on - // this stream. - // - // An invalid value will cause an error. - WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` - // A portion of the data for the object. - // - // Types that are assignable to Data: - // - // *BidiWriteObjectRequest_ChecksummedData - Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` - // Checksums for the complete object. If the checksums computed by the service - // don't match the specified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). - ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` - // For each BidiWriteObjectRequest where state_lookup is `true` or the client - // closes the stream, the service will send a BidiWriteObjectResponse - // containing the current persisted size. The persisted size sent in responses - // covers all the bytes the server has persisted thus far and can be used to - // decide what data is safe for the client to drop. Note that the object's - // current size reported by the BidiWriteObjectResponse may lag behind the - // number of bytes written by the client. This field is ignored if - // `finish_write` is set to true. - StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` - // Persists data written on the stream, up to and including the current - // message, to permanent storage. This option should be used sparingly as it - // may reduce performance. Ongoing writes will periodically be persisted on - // the server even when `flush` is not set. This field is ignored if - // `finish_write` is set to true since there's no need to checkpoint or flush - // if this message completes the write. - Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` - // If `true`, this indicates that the write is complete. Sending any - // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` - // will cause an error. - // For a non-resumable write (where the upload_id was not set in the first - // message), it is an error not to set this field in the final message of the - // stream. - FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *BidiWriteObjectRequest) Reset() { - *x = BidiWriteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BidiWriteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BidiWriteObjectRequest) ProtoMessage() {} - -func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. -func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} -} - -func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { - if m != nil { - return m.FirstMessage - } - return nil -} - -func (x *BidiWriteObjectRequest) GetUploadId() string { - if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok { - return x.UploadId - } - return "" -} - -func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { - if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok { - return x.WriteObjectSpec - } - return nil -} - -func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { - if x != nil { - return x.WriteOffset - } - return 0 -} - -func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data { - if m != nil { - return m.Data - } - return nil -} - -func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData { - if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok { - return x.ChecksummedData - } - return nil -} - -func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -func (x *BidiWriteObjectRequest) GetStateLookup() bool { - if x != nil { - return x.StateLookup - } - return false -} - -func (x *BidiWriteObjectRequest) GetFlush() bool { - if x != nil { - return x.Flush - } - return false -} - -func (x *BidiWriteObjectRequest) GetFinishWrite() bool { - if x != nil { - return x.FinishWrite - } - return false -} - -func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -type isBidiWriteObjectRequest_FirstMessage interface { - isBidiWriteObjectRequest_FirstMessage() -} - -type BidiWriteObjectRequest_UploadId struct { - // For resumable uploads. This should be the `upload_id` returned from a - // call to `StartResumableWriteResponse`. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` -} - -type BidiWriteObjectRequest_WriteObjectSpec struct { - // For non-resumable uploads. Describes the overall upload, including the - // destination bucket and object name, preconditions, etc. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` -} - -func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} - -func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} - -type isBidiWriteObjectRequest_Data interface { - isBidiWriteObjectRequest_Data() -} - -type BidiWriteObjectRequest_ChecksummedData struct { - // The data to insert. If a crc32c checksum is provided that doesn't match - // the checksum computed by the service, the request will fail. - ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` -} - -func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {} - -// Response message for BidiWriteObject. -type BidiWriteObjectResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The response will set one of the following. - // - // Types that are assignable to WriteStatus: - // - // *BidiWriteObjectResponse_PersistedSize - // *BidiWriteObjectResponse_Resource - WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` -} - -func (x *BidiWriteObjectResponse) Reset() { - *x = BidiWriteObjectResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BidiWriteObjectResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BidiWriteObjectResponse) ProtoMessage() {} - -func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. -func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} -} - -func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { - if m != nil { - return m.WriteStatus - } - return nil -} - -func (x *BidiWriteObjectResponse) GetPersistedSize() int64 { - if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok { - return x.PersistedSize - } - return 0 -} - -func (x *BidiWriteObjectResponse) GetResource() *Object { - if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok { - return x.Resource - } - return nil -} - -type isBidiWriteObjectResponse_WriteStatus interface { - isBidiWriteObjectResponse_WriteStatus() -} - -type BidiWriteObjectResponse_PersistedSize struct { - // The total number of bytes that have been processed for the given object - // from all `WriteObject` calls. Only set if the upload has not finalized. - PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` -} - -type BidiWriteObjectResponse_Resource struct { - // A resource containing the metadata for the uploaded object. Only set if - // the upload has finalized. - Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` -} - -func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {} - -func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {} - -// Request message for ListObjects. -type ListObjectsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Name of the bucket in which to look for objects. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of `items` plus `prefixes` to return - // in a single page of responses. As duplicate `prefixes` are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, returns results in a directory-like mode. `items` will contain - // only objects whose names, aside from the `prefix`, do not - // contain `delimiter`. Objects whose names, aside from the - // `prefix`, contain `delimiter` will have their name, - // truncated after the `delimiter`, returned in - // `prefixes`. Duplicate `prefixes` are omitted. - Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` - // If true, objects that end in exactly one instance of `delimiter` - // will have their metadata included in `items` in addition to - // `prefixes`. - IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` - // Filter results to objects whose names begin with this prefix. - Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` - // If `true`, lists all versions of an object as distinct results. - // For more information, see - // [Object - // Versioning](https://cloud.google.com/storage/docs/object-versioning). - Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.acl and - // items.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` - // Optional. Filter results to objects whose names are lexicographically equal - // to or after lexicographic_start. If lexicographic_end is also set, the - // objects listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` - // Optional. Filter results to objects whose names are lexicographically - // before lexicographic_end. If lexicographic_start is also set, the objects - // listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` - // Optional. If true, only list all soft-deleted versions of the object. - // Soft delete policy is required to set this option. - SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` - // Optional. Filter results to objects and prefixes that match this glob - // pattern. See [List Objects Using - // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) - // for the full syntax. - MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` -} - -func (x *ListObjectsRequest) Reset() { - *x = ListObjectsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListObjectsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListObjectsRequest) ProtoMessage() {} - -func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. -func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} -} - -func (x *ListObjectsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListObjectsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListObjectsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListObjectsRequest) GetDelimiter() string { - if x != nil { - return x.Delimiter - } - return "" -} - -func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool { - if x != nil { - return x.IncludeTrailingDelimiter - } - return false -} - -func (x *ListObjectsRequest) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *ListObjectsRequest) GetVersions() bool { - if x != nil { - return x.Versions - } - return false -} - -func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.ReadMask - } - return nil -} - -func (x *ListObjectsRequest) GetLexicographicStart() string { - if x != nil { - return x.LexicographicStart - } - return "" -} - -func (x *ListObjectsRequest) GetLexicographicEnd() string { - if x != nil { - return x.LexicographicEnd - } - return "" -} - -func (x *ListObjectsRequest) GetSoftDeleted() bool { - if x != nil { - return x.SoftDeleted - } - return false -} - -func (x *ListObjectsRequest) GetMatchGlob() string { - if x != nil { - return x.MatchGlob - } - return "" -} - -// Request object for `QueryWriteStatus`. -type QueryWriteStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the resume token for the object whose write status is - // being requested. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *QueryWriteStatusRequest) Reset() { - *x = QueryWriteStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryWriteStatusRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryWriteStatusRequest) ProtoMessage() {} - -func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. -func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} -} - -func (x *QueryWriteStatusRequest) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Response object for `QueryWriteStatus`. -type QueryWriteStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The response will set one of the following. - // - // Types that are assignable to WriteStatus: - // - // *QueryWriteStatusResponse_PersistedSize - // *QueryWriteStatusResponse_Resource - WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` -} - -func (x *QueryWriteStatusResponse) Reset() { - *x = QueryWriteStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryWriteStatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryWriteStatusResponse) ProtoMessage() {} - -func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. -func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} -} - -func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { - if m != nil { - return m.WriteStatus - } - return nil -} - -func (x *QueryWriteStatusResponse) GetPersistedSize() int64 { - if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok { - return x.PersistedSize - } - return 0 -} - -func (x *QueryWriteStatusResponse) GetResource() *Object { - if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok { - return x.Resource - } - return nil -} - -type isQueryWriteStatusResponse_WriteStatus interface { - isQueryWriteStatusResponse_WriteStatus() -} - -type QueryWriteStatusResponse_PersistedSize struct { - // The total number of bytes that have been processed for the given object - // from all `WriteObject` calls. This is the correct value for the - // 'write_offset' field to use when resuming the `WriteObject` operation. - // Only set if the upload has not finalized. - PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` -} - -type QueryWriteStatusResponse_Resource struct { - // A resource containing the metadata for the uploaded object. Only set if - // the upload has finalized. - Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` -} - -func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {} - -func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {} - -// Request message for RewriteObject. -// If the source object is encrypted using a Customer-Supplied Encryption Key -// the key information must be provided in the copy_source_encryption_algorithm, -// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes -// fields. If the destination object should be encrypted the keying information -// should be provided in the encryption_algorithm, encryption_key_bytes, and -// encryption_key_sha256_bytes fields of the -// common_object_request_params.customer_encryption field. -type RewriteObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Immutable. The name of the destination object. - // See the - // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming). - // Example: `test.txt` - // The `name` field by itself does not uniquely identify a Cloud Storage - // object. A Cloud Storage object is uniquely identified by the tuple of - // (bucket, object, generation). - DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"` - // Required. Immutable. The name of the bucket containing the destination - // object. - DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"` - // The name of the Cloud KMS key that will be used to encrypt the destination - // object. The Cloud KMS key must be located in same location as the object. - // If the parameter is not specified, the request uses the destination - // bucket's default encryption key, if any, or else the Google-managed - // encryption key. - DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"` - // Properties of the destination, post-rewrite object. - // The `name`, `bucket` and `kms_key` fields must not be populated (these - // values are specified in the `destination_name`, `destination_bucket`, and - // `destination_kms_key` fields). - // If `destination` is present it will be used to construct the destination - // object's metadata; otherwise the destination object's metadata will be - // copied from the source object. - Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - // Required. Name of the bucket in which to find the source object. - SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"` - // Required. Name of the source object. - SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"` - // If present, selects a specific revision of the source object (as opposed to - // the latest version, the default). - SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"` - // Include this field (from the previous rewrite response) on each rewrite - // request after the first one, until the rewrite response 'done' flag is - // true. Calls that provide a rewriteToken can omit all other request fields, - // but if included those fields must match the values provided in the first - // rewrite request. - RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` - // Apply a predefined set of access controls to the destination object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the destination object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the destination object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Makes the operation conditional on whether the source object's live - // generation matches the given value. - IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"` - // Makes the operation conditional on whether the source object's live - // generation does not match the given value. - IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"` - // Makes the operation conditional on whether the source object's current - // metageneration matches the given value. - IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"` - // Makes the operation conditional on whether the source object's current - // metageneration does not match the given value. - IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"` - // The maximum number of bytes that will be rewritten per rewrite request. - // Most callers - // shouldn't need to specify this parameter - it is primarily in place to - // support testing. If specified the value must be an integral multiple of - // 1 MiB (1048576). Also, this only applies to requests where the source and - // destination span locations and/or storage classes. Finally, this value must - // not change across rewrite calls else you'll get an error that the - // `rewriteToken` is invalid. - MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"` - // The algorithm used to encrypt the source object, if any. Used if the source - // object was encrypted with a Customer-Supplied Encryption Key. - CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"` - // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt - // the source object, if it was encrypted with a Customer-Supplied Encryption - // Key. - CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"` - // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used - // to encrypt the source object, if it was encrypted with a Customer-Supplied - // Encryption Key. - CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be used to validate the - // destination object after rewriting. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *RewriteObjectRequest) Reset() { - *x = RewriteObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RewriteObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RewriteObjectRequest) ProtoMessage() {} - -func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. -func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} -} - -func (x *RewriteObjectRequest) GetDestinationName() string { - if x != nil { - return x.DestinationName - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationBucket() string { - if x != nil { - return x.DestinationBucket - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationKmsKey() string { - if x != nil { - return x.DestinationKmsKey - } - return "" -} - -func (x *RewriteObjectRequest) GetDestination() *Object { - if x != nil { - return x.Destination - } - return nil -} - -func (x *RewriteObjectRequest) GetSourceBucket() string { - if x != nil { - return x.SourceBucket - } - return "" -} - -func (x *RewriteObjectRequest) GetSourceObject() string { - if x != nil { - return x.SourceObject - } - return "" -} - -func (x *RewriteObjectRequest) GetSourceGeneration() int64 { - if x != nil { - return x.SourceGeneration - } - return 0 -} - -func (x *RewriteObjectRequest) GetRewriteToken() string { - if x != nil { - return x.RewriteToken - } - return "" -} - -func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string { - if x != nil { - return x.DestinationPredefinedAcl - } - return "" -} - -func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 { - if x != nil && x.IfSourceGenerationMatch != nil { - return *x.IfSourceGenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 { - if x != nil && x.IfSourceGenerationNotMatch != nil { - return *x.IfSourceGenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 { - if x != nil && x.IfSourceMetagenerationMatch != nil { - return *x.IfSourceMetagenerationMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 { - if x != nil && x.IfSourceMetagenerationNotMatch != nil { - return *x.IfSourceMetagenerationNotMatch - } - return 0 -} - -func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 { - if x != nil { - return x.MaxBytesRewrittenPerCall - } - return 0 -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string { - if x != nil { - return x.CopySourceEncryptionAlgorithm - } - return "" -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte { - if x != nil { - return x.CopySourceEncryptionKeyBytes - } - return nil -} - -func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.CopySourceEncryptionKeySha256Bytes - } - return nil -} - -func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// A rewrite response. -type RewriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The total bytes written so far, which can be used to provide a waiting user - // with a progress indicator. This property is always present in the response. - TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"` - // The total size of the object being copied in bytes. This property is always - // present in the response. - ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"` - // `true` if the copy is finished; otherwise, `false` if - // the copy is in progress. This property is always present in the response. - Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"` - // A token to use in subsequent requests to continue copying data. This token - // is present in the response only when there is more data to copy. - RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"` - // A resource containing the metadata for the copied-to object. This property - // is present in the response only when copying completes. - Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"` -} - -func (x *RewriteResponse) Reset() { - *x = RewriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RewriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RewriteResponse) ProtoMessage() {} - -func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. -func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} -} - -func (x *RewriteResponse) GetTotalBytesRewritten() int64 { - if x != nil { - return x.TotalBytesRewritten - } - return 0 -} - -func (x *RewriteResponse) GetObjectSize() int64 { - if x != nil { - return x.ObjectSize - } - return 0 -} - -func (x *RewriteResponse) GetDone() bool { - if x != nil { - return x.Done - } - return false -} - -func (x *RewriteResponse) GetRewriteToken() string { - if x != nil { - return x.RewriteToken - } - return "" -} - -func (x *RewriteResponse) GetResource() *Object { - if x != nil { - return x.Resource - } - return nil -} - -// Request message StartResumableWrite. -type StartResumableWriteRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The destination bucket, object, and metadata, as well as any - // preconditions. - WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` - // The checksums of the complete object. This will be used to validate the - // uploaded object. For each upload, object_checksums can be provided with - // either StartResumableWriteRequest or the WriteObjectRequest with - // finish_write set to `true`. - ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` -} - -func (x *StartResumableWriteRequest) Reset() { - *x = StartResumableWriteRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResumableWriteRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResumableWriteRequest) ProtoMessage() {} - -func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. -func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} -} - -func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { - if x != nil { - return x.WriteObjectSpec - } - return nil -} - -func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums { - if x != nil { - return x.ObjectChecksums - } - return nil -} - -// Response object for `StartResumableWrite`. -type StartResumableWriteResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The upload_id of the newly started resumable write operation. This - // value should be copied into the `WriteObjectRequest.upload_id` field. - UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"` -} - -func (x *StartResumableWriteResponse) Reset() { - *x = StartResumableWriteResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResumableWriteResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResumableWriteResponse) ProtoMessage() {} - -func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. -func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} -} - -func (x *StartResumableWriteResponse) GetUploadId() string { - if x != nil { - return x.UploadId - } - return "" -} - -// Request message for UpdateObject. -type UpdateObjectRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The object to update. - // The object's bucket and name fields are used to identify the object to - // update. If present, the object's generation field selects a specific - // revision of this object whose metadata should be updated. Otherwise, - // assumes the live version of the object. - Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` - // Makes the operation conditional on whether the object's live generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` - // Apply a predefined set of access controls to this object. - // Valid values are "authenticatedRead", "bucketOwnerFullControl", - // "bucketOwnerRead", "private", "projectPrivate", or "publicRead". - PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"` - // Required. List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` -} - -func (x *UpdateObjectRequest) Reset() { - *x = UpdateObjectRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateObjectRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateObjectRequest) ProtoMessage() {} - -func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. -func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} -} - -func (x *UpdateObjectRequest) GetObject() *Object { - if x != nil { - return x.Object - } - return nil -} - -func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 { - if x != nil && x.IfGenerationNotMatch != nil { - return *x.IfGenerationNotMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 { - if x != nil && x.IfMetagenerationMatch != nil { - return *x.IfMetagenerationMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 { - if x != nil && x.IfMetagenerationNotMatch != nil { - return *x.IfMetagenerationNotMatch - } - return 0 -} - -func (x *UpdateObjectRequest) GetPredefinedAcl() string { - if x != nil { - return x.PredefinedAcl - } - return "" -} - -func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { - if x != nil { - return x.CommonObjectRequestParams - } - return nil -} - -// Request message for GetServiceAccount. -type GetServiceAccountRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. Project ID, in the format of "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetServiceAccountRequest) Reset() { - *x = GetServiceAccountRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetServiceAccountRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetServiceAccountRequest) ProtoMessage() {} - -func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. -func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} -} - -func (x *GetServiceAccountRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request message for CreateHmacKey. -type CreateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/{projectIdentifier}". {projectIdentifier} can be the - // project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // Required. The service account to create the HMAC for. - ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` -} - -func (x *CreateHmacKeyRequest) Reset() { - *x = CreateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyRequest) ProtoMessage() {} - -func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} -} - -func (x *CreateHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -// Create hmac response. The only time the secret for an HMAC will be returned. -type CreateHmacKeyResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Key metadata. - Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` - // HMAC key secret material. - // In raw bytes format (not base64-encoded). - SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"` -} - -func (x *CreateHmacKeyResponse) Reset() { - *x = CreateHmacKeyResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateHmacKeyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateHmacKeyResponse) ProtoMessage() {} - -func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. -func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} -} - -func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte { - if x != nil { - return x.SecretKeyBytes - } - return nil -} - -// Request object to delete a given HMAC key. -type DeleteHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project that owns the HMAC key, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *DeleteHmacKeyRequest) Reset() { - *x = DeleteHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteHmacKeyRequest) ProtoMessage() {} - -func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} -} - -func (x *DeleteHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *DeleteHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request object to get metadata on a given HMAC key. -type GetHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The identifying key for the HMAC to delete. - AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Required. The project the HMAC key lies in, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` -} - -func (x *GetHmacKeyRequest) Reset() { - *x = GetHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetHmacKeyRequest) ProtoMessage() {} - -func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} -} - -func (x *GetHmacKeyRequest) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *GetHmacKeyRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -// Request to fetch a list of HMAC keys under a given project. -type ListHmacKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The project to list HMAC keys for, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` - // The maximum number of keys to return. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously returned token from ListHmacKeysResponse to get the next page. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, filters to only return HMAC keys for specified service account. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // If set, return deleted keys that have not yet been wiped out. - ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"` -} - -func (x *ListHmacKeysRequest) Reset() { - *x = ListHmacKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysRequest) ProtoMessage() {} - -func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. -func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} -} - -func (x *ListHmacKeysRequest) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *ListHmacKeysRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListHmacKeysRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListHmacKeysRequest) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool { - if x != nil { - return x.ShowDeletedKeys - } - return false -} - -// Hmac key list response with next page information. -type ListHmacKeysResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListHmacKeysResponse) Reset() { - *x = ListHmacKeysResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListHmacKeysResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListHmacKeysResponse) ProtoMessage() {} - -func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. -func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} -} - -func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { - if x != nil { - return x.HmacKeys - } - return nil -} - -func (x *ListHmacKeysResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Request object to update an HMAC key state. -// HmacKeyMetadata.state is required and the only writable field in -// UpdateHmacKey operation. Specifying fields other than state will result in an -// error. -type UpdateHmacKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The HMAC key to update. - // If present, the hmac_key's `id` field will be used to identify the key. - // Otherwise, the hmac_key's access_id and project fields will be used to - // identify the key. - HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"` - // Update mask for hmac_key. - // Not specifying any fields will mean only the `state` field is updated to - // the value specified in `hmac_key`. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` -} - -func (x *UpdateHmacKeyRequest) Reset() { - *x = UpdateHmacKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateHmacKeyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateHmacKeyRequest) ProtoMessage() {} - -func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. -func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} -} - -func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { - if x != nil { - return x.HmacKey - } - return nil -} - -func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { - if x != nil { - return x.UpdateMask - } - return nil -} - -// Parameters that can be passed to any object request. -type CommonObjectRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Encryption algorithm used with the Customer-Supplied Encryption Keys - // feature. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // Encryption key used with the Customer-Supplied Encryption Keys feature. - // In raw bytes format (not base64-encoded). - EncryptionKeyBytes []byte `protobuf:"bytes,4,opt,name=encryption_key_bytes,json=encryptionKeyBytes,proto3" json:"encryption_key_bytes,omitempty"` - // SHA256 hash of encryption key used with the Customer-Supplied Encryption - // Keys feature. - EncryptionKeySha256Bytes []byte `protobuf:"bytes,5,opt,name=encryption_key_sha256_bytes,json=encryptionKeySha256Bytes,proto3" json:"encryption_key_sha256_bytes,omitempty"` -} - -func (x *CommonObjectRequestParams) Reset() { - *x = CommonObjectRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CommonObjectRequestParams) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CommonObjectRequestParams) ProtoMessage() {} - -func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. -func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} -} - -func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CommonObjectRequestParams) GetEncryptionKeyBytes() []byte { - if x != nil { - return x.EncryptionKeyBytes - } - return nil -} - -func (x *CommonObjectRequestParams) GetEncryptionKeySha256Bytes() []byte { - if x != nil { - return x.EncryptionKeySha256Bytes - } - return nil -} - -// Shared constants. -type ServiceConstants struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ServiceConstants) Reset() { - *x = ServiceConstants{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConstants) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConstants) ProtoMessage() {} - -func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. -func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} -} - -// A bucket. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of the bucket. - // Format: `projects/{project}/buckets/{bucket}` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Output only. The user-chosen part of the bucket name. The `{bucket}` - // portion of the `name` field. For globally unique buckets, this is equal to - // the "bucket name" of other Cloud Storage APIs. Example: "pub". - BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` - // The etag of the bucket. - // If included in the metadata of an UpdateBucketRequest, the operation will - // only be performed if the etag matches that of the bucket. - Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The project which owns this bucket, in the format of - // "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. The metadata generation of this bucket. - Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` - // Immutable. The location of the bucket. Object data for objects in the - // bucket resides in physical storage within this region. Defaults to `US`. - // See the - // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's - // guide] for the authoritative list. Attempting to update this field after - // the bucket is created will result in an error. - Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` - // Output only. The location type of the bucket (region, dual-region, - // multi-region, etc). - LocationType string `protobuf:"bytes,6,opt,name=location_type,json=locationType,proto3" json:"location_type,omitempty"` - // The bucket's default storage class, used whenever no storageClass is - // specified for a newly-created object. This defines how objects in the - // bucket are stored and determines the SLA and the cost of storage. - // If this value is not specified when the bucket is created, it will default - // to `STANDARD`. For more information, see - // https://developers.google.com/storage/docs/storage-classes. - StorageClass string `protobuf:"bytes,7,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` - // The recovery point objective for cross-region replication of the bucket. - // Applicable only for dual- and multi-region buckets. "DEFAULT" uses default - // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region - // buckets only. If rpo is not specified when the bucket is created, it - // defaults to "DEFAULT". For more information, see - // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. - Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` - // Access controls on the bucket. - // If iam_config.uniform_bucket_level_access is enabled on this bucket, - // requests to set, read, or modify acl is an error. - Acl []*BucketAccessControl `protobuf:"bytes,8,rep,name=acl,proto3" json:"acl,omitempty"` - // Default access controls to apply to new objects when no ACL is provided. - // If iam_config.uniform_bucket_level_access is enabled on this bucket, - // requests to set, read, or modify acl is an error. - DefaultObjectAcl []*ObjectAccessControl `protobuf:"bytes,9,rep,name=default_object_acl,json=defaultObjectAcl,proto3" json:"default_object_acl,omitempty"` - // The bucket's lifecycle config. See - // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] - // for more information. - Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` - // Output only. The creation time of the bucket. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] - // (CORS) config. - Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` - // Output only. The modification time of the bucket. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The default value for event-based hold on newly created objects in this - // bucket. Event-based hold is a way to retain objects indefinitely until an - // event occurs, signified by the - // hold's release. After being released, such objects will be subject to - // bucket-level retention (if any). One sample use case of this flag is for - // banks to hold loan documents for at least 3 years after loan is paid in - // full. Here, bucket-level retention is 3 years and the event is loan being - // paid in full. In this example, these objects will be held intact for any - // number of years until the event has occurred (event-based hold on the - // object is released) and then 3 more years after that. That means retention - // duration of the objects begins from the moment event-based hold - // transitioned from true to false. Objects under event-based hold cannot be - // deleted, overwritten or archived until the hold is removed. - DefaultEventBasedHold bool `protobuf:"varint,14,opt,name=default_event_based_hold,json=defaultEventBasedHold,proto3" json:"default_event_based_hold,omitempty"` - // User-provided labels, in key/value pairs. - Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The bucket's website config, controlling how the service behaves - // when accessing bucket contents as a web site. See the - // [https://cloud.google.com/storage/docs/static-website][Static Website - // Examples] for more information. - Website *Bucket_Website `protobuf:"bytes,16,opt,name=website,proto3" json:"website,omitempty"` - // The bucket's versioning config. - Versioning *Bucket_Versioning `protobuf:"bytes,17,opt,name=versioning,proto3" json:"versioning,omitempty"` - // The bucket's logging config, which defines the destination bucket - // and name prefix (if any) for the current bucket's logs. - Logging *Bucket_Logging `protobuf:"bytes,18,opt,name=logging,proto3" json:"logging,omitempty"` - // Output only. The owner of the bucket. This is always the project team's - // owner group. - Owner *Owner `protobuf:"bytes,19,opt,name=owner,proto3" json:"owner,omitempty"` - // Encryption config for a bucket. - Encryption *Bucket_Encryption `protobuf:"bytes,20,opt,name=encryption,proto3" json:"encryption,omitempty"` - // The bucket's billing config. - Billing *Bucket_Billing `protobuf:"bytes,21,opt,name=billing,proto3" json:"billing,omitempty"` - // The bucket's retention policy. The retention policy enforces a minimum - // retention time for all objects contained in the bucket, based on their - // creation time. Any attempt to overwrite or delete objects younger than the - // retention period will result in a PERMISSION_DENIED error. An unlocked - // retention policy can be modified or removed from the bucket via a - // storage.buckets.update operation. A locked retention policy cannot be - // removed or shortened in duration for the lifetime of the bucket. - // Attempting to remove or decrease period of a locked retention policy will - // result in a PERMISSION_DENIED error. - RetentionPolicy *Bucket_RetentionPolicy `protobuf:"bytes,22,opt,name=retention_policy,json=retentionPolicy,proto3" json:"retention_policy,omitempty"` - // The bucket's IAM config. - IamConfig *Bucket_IamConfig `protobuf:"bytes,23,opt,name=iam_config,json=iamConfig,proto3" json:"iam_config,omitempty"` - // Reserved for future use. - SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"` - // Configuration that, if present, specifies the data placement for a - // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region]. - CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"` - // The bucket's Autoclass configuration. If there is no configuration, the - // Autoclass feature will be disabled and have no effect on the bucket. - Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` - // Optional. The bucket's soft delete policy. The soft delete policy prevents - // soft-deleted objects from being permanently deleted. - SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` -} - -func (x *Bucket) Reset() { - *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket) ProtoMessage() {} - -func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. -func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} -} - -func (x *Bucket) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Bucket) GetBucketId() string { - if x != nil { - return x.BucketId - } - return "" -} - -func (x *Bucket) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *Bucket) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *Bucket) GetMetageneration() int64 { - if x != nil { - return x.Metageneration - } - return 0 -} - -func (x *Bucket) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Bucket) GetLocationType() string { - if x != nil { - return x.LocationType - } - return "" -} - -func (x *Bucket) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -func (x *Bucket) GetRpo() string { - if x != nil { - return x.Rpo - } - return "" -} - -func (x *Bucket) GetAcl() []*BucketAccessControl { - if x != nil { - return x.Acl - } - return nil -} - -func (x *Bucket) GetDefaultObjectAcl() []*ObjectAccessControl { - if x != nil { - return x.DefaultObjectAcl - } - return nil -} - -func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { - if x != nil { - return x.Lifecycle - } - return nil -} - -func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *Bucket) GetCors() []*Bucket_Cors { - if x != nil { - return x.Cors - } - return nil -} - -func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *Bucket) GetDefaultEventBasedHold() bool { - if x != nil { - return x.DefaultEventBasedHold - } - return false -} - -func (x *Bucket) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Bucket) GetWebsite() *Bucket_Website { - if x != nil { - return x.Website - } - return nil -} - -func (x *Bucket) GetVersioning() *Bucket_Versioning { - if x != nil { - return x.Versioning - } - return nil -} - -func (x *Bucket) GetLogging() *Bucket_Logging { - if x != nil { - return x.Logging - } - return nil -} - -func (x *Bucket) GetOwner() *Owner { - if x != nil { - return x.Owner - } - return nil -} - -func (x *Bucket) GetEncryption() *Bucket_Encryption { - if x != nil { - return x.Encryption - } - return nil -} - -func (x *Bucket) GetBilling() *Bucket_Billing { - if x != nil { - return x.Billing - } - return nil -} - -func (x *Bucket) GetRetentionPolicy() *Bucket_RetentionPolicy { - if x != nil { - return x.RetentionPolicy - } - return nil -} - -func (x *Bucket) GetIamConfig() *Bucket_IamConfig { - if x != nil { - return x.IamConfig - } - return nil -} - -func (x *Bucket) GetSatisfiesPzs() bool { - if x != nil { - return x.SatisfiesPzs - } - return false -} - -func (x *Bucket) GetCustomPlacementConfig() *Bucket_CustomPlacementConfig { - if x != nil { - return x.CustomPlacementConfig - } - return nil -} - -func (x *Bucket) GetAutoclass() *Bucket_Autoclass { - if x != nil { - return x.Autoclass - } - return nil -} - -func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { - if x != nil { - return x.SoftDeletePolicy - } - return nil -} - -// An access-control entry. -type BucketAccessControl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The access permission for the entity. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // The ID of the access-control entry. - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team}-{projectnumber}` - // * `project-{team}-{projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com` - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com` - // For project entities, `project-{team}-{projectnumber}` format will be - // returned on response. - Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` - // Output only. The alternative entity format, if exists. For project - // entities, `project-{team}-{projectid}` format will be returned on response. - EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` - // The ID for the entity, if any. - EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` - // The etag of the BucketAccessControl. - // If included in the metadata of an update or delete request message, the - // operation operation will only be performed if the etag matches that of the - // bucket's BucketAccessControl. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` - // The email address associated with the entity, if any. - Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` - // The domain associated with the entity, if any. - Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` - // The project team associated with the entity, if any. - ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` -} - -func (x *BucketAccessControl) Reset() { - *x = BucketAccessControl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BucketAccessControl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BucketAccessControl) ProtoMessage() {} - -func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. -func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} -} - -func (x *BucketAccessControl) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *BucketAccessControl) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *BucketAccessControl) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *BucketAccessControl) GetEntityAlt() string { - if x != nil { - return x.EntityAlt - } - return "" -} - -func (x *BucketAccessControl) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -func (x *BucketAccessControl) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *BucketAccessControl) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *BucketAccessControl) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *BucketAccessControl) GetProjectTeam() *ProjectTeam { - if x != nil { - return x.ProjectTeam - } - return nil -} - -// Message used to convey content being read or written, along with an optional -// checksum. -type ChecksummedData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. The data. - Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` - // If set, the CRC32C digest of the content field. - Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` -} - -func (x *ChecksummedData) Reset() { - *x = ChecksummedData{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ChecksummedData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChecksummedData) ProtoMessage() {} - -func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. -func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} -} - -func (x *ChecksummedData) GetContent() []byte { - if x != nil { - return x.Content - } - return nil -} - -func (x *ChecksummedData) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -// Message used for storing full (not subrange) object checksums. -type ObjectChecksums struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // CRC32C digest of the object data. Computed by the Cloud Storage service for - // all written objects. - // If set in a WriteObjectRequest, service will validate that the stored - // object matches this checksum. - Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` - // 128 bit MD5 hash of the object data. - // For more information about using the MD5 hash, see - // [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and - // ETags: Best Practices]. - // Not all objects will provide an MD5 hash. For example, composite objects - // provide only crc32c hashes. - // This value is equivalent to running `cat object.txt | openssl md5 -binary` - Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"` -} - -func (x *ObjectChecksums) Reset() { - *x = ObjectChecksums{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectChecksums) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectChecksums) ProtoMessage() {} - -func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. -func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} -} - -func (x *ObjectChecksums) GetCrc32C() uint32 { - if x != nil && x.Crc32C != nil { - return *x.Crc32C - } - return 0 -} - -func (x *ObjectChecksums) GetMd5Hash() []byte { - if x != nil { - return x.Md5Hash - } - return nil -} - -// Hmac Key Metadata, which includes all information other than the secret. -type HmacKeyMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. Resource name ID of the key in the format - // {projectIdentifier}/{accessId}. - // {projectIdentifier} can be the project ID or project number. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Immutable. Globally unique id for keys. - AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` - // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/{projectIdentifier}". - // {projectIdentifier} can be the project ID or project number. - Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` - // Output only. Email of the service account the key authenticates as. - ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` - // State of the key. One of ACTIVE, INACTIVE, or DELETED. - // Writable, can be updated by UpdateHmacKey operation. - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // Output only. The creation time of the HMAC key. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // The etag of the HMAC key. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` -} - -func (x *HmacKeyMetadata) Reset() { - *x = HmacKeyMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HmacKeyMetadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HmacKeyMetadata) ProtoMessage() {} - -func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. -func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} -} - -func (x *HmacKeyMetadata) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *HmacKeyMetadata) GetAccessId() string { - if x != nil { - return x.AccessId - } - return "" -} - -func (x *HmacKeyMetadata) GetProject() string { - if x != nil { - return x.Project - } - return "" -} - -func (x *HmacKeyMetadata) GetServiceAccountEmail() string { - if x != nil { - return x.ServiceAccountEmail - } - return "" -} - -func (x *HmacKeyMetadata) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *HmacKeyMetadata) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -// A directive to publish Pub/Sub notifications upon changes to a bucket. -type NotificationConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The resource name of this NotificationConfig. - // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` - // The `{project}` portion may be `_` for globally unique buckets. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. The Pub/Sub topic to which this subscription publishes. Formatted - // as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the NotificationConfig. - // If included in the metadata of GetNotificationConfigRequest, the operation - // will only be performed if the etag matches that of the NotificationConfig. - Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If - // empty, sent notifications for all event types. - EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` - // A list of additional attributes to attach to each Pub/Sub - // message published for this NotificationConfig. - CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this NotificationConfig to object names that - // begin with this prefix. - ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` - // Required. The desired content of the Payload. - PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` -} - -func (x *NotificationConfig) Reset() { - *x = NotificationConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NotificationConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NotificationConfig) ProtoMessage() {} - -func (x *NotificationConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. -func (*NotificationConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} -} - -func (x *NotificationConfig) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NotificationConfig) GetTopic() string { - if x != nil { - return x.Topic - } - return "" -} - -func (x *NotificationConfig) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *NotificationConfig) GetEventTypes() []string { - if x != nil { - return x.EventTypes - } - return nil -} - -func (x *NotificationConfig) GetCustomAttributes() map[string]string { - if x != nil { - return x.CustomAttributes - } - return nil -} - -func (x *NotificationConfig) GetObjectNamePrefix() string { - if x != nil { - return x.ObjectNamePrefix - } - return "" -} - -func (x *NotificationConfig) GetPayloadFormat() string { - if x != nil { - return x.PayloadFormat - } - return "" -} - -// Describes the Customer-Supplied Encryption Key mechanism used to store an -// Object's data at rest. -type CustomerEncryption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encryption algorithm. - EncryptionAlgorithm string `protobuf:"bytes,1,opt,name=encryption_algorithm,json=encryptionAlgorithm,proto3" json:"encryption_algorithm,omitempty"` - // SHA256 hash value of the encryption key. - // In raw bytes format (not base64-encoded). - KeySha256Bytes []byte `protobuf:"bytes,3,opt,name=key_sha256_bytes,json=keySha256Bytes,proto3" json:"key_sha256_bytes,omitempty"` -} - -func (x *CustomerEncryption) Reset() { - *x = CustomerEncryption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomerEncryption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomerEncryption) ProtoMessage() {} - -func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. -func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} -} - -func (x *CustomerEncryption) GetEncryptionAlgorithm() string { - if x != nil { - return x.EncryptionAlgorithm - } - return "" -} - -func (x *CustomerEncryption) GetKeySha256Bytes() []byte { - if x != nil { - return x.KeySha256Bytes - } - return nil -} - -// An object. -type Object struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Immutable. The name of this object. Nearly any sequence of unicode - // characters is valid. See - // [Guidelines](https://cloud.google.com/storage/docs/objects#naming). - // Example: `test.txt` - // The `name` field by itself does not uniquely identify a Cloud Storage - // object. A Cloud Storage object is uniquely identified by the tuple of - // (bucket, object, generation). - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Immutable. The name of the bucket containing this object. - Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` - // The etag of the object. - // If included in the metadata of an update or delete request message, the - // operation will only be performed if the etag matches that of the live - // object. - Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` - // Immutable. The content generation of this object. Used for object - // versioning. - Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` - // Output only. The version of the metadata for this generation of this - // object. Used for preconditions and for detecting changes in metadata. A - // metageneration number is only meaningful in the context of a particular - // generation of a particular object. - Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` - // Storage class of the object. - StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` - // Output only. Content-Length of the object data in bytes, matching - // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` - // Content-Encoding of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] - ContentEncoding string `protobuf:"bytes,7,opt,name=content_encoding,json=contentEncoding,proto3" json:"content_encoding,omitempty"` - // Content-Disposition of the object data, matching - // [https://tools.ietf.org/html/rfc6266][RFC 6266]. - ContentDisposition string `protobuf:"bytes,8,opt,name=content_disposition,json=contentDisposition,proto3" json:"content_disposition,omitempty"` - // Cache-Control directive for the object data, matching - // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. - // If omitted, and the object is accessible to all anonymous users, the - // default will be `public, max-age=3600`. - CacheControl string `protobuf:"bytes,9,opt,name=cache_control,json=cacheControl,proto3" json:"cache_control,omitempty"` - // Access controls on the object. - // If iam_config.uniform_bucket_level_access is enabled on the parent - // bucket, requests to set, read, or modify acl is an error. - Acl []*ObjectAccessControl `protobuf:"bytes,10,rep,name=acl,proto3" json:"acl,omitempty"` - // Content-Language of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. - ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` - // Output only. If this object is noncurrent, this is the time when the object - // became noncurrent. - DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` - // Content-Type of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. - // If an object is stored without a Content-Type, it is served as - // `application/octet-stream`. - ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - // Output only. The creation time of the object. - CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - // Output only. Number of underlying components that make up this object. - // Components are accumulated by compose operations. - ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` - // Output only. Hashes for the data part of this object. This field is used - // for output only and will be silently ignored if provided in requests. - Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"` - // Output only. The modification time of the object metadata. - // Set initially to object creation time and then updated whenever any - // metadata of the object changes. This includes changes made by a requester, - // such as modifying custom metadata, as well as changes made by Cloud Storage - // on behalf of a requester, such as changing the storage class based on an - // Object Lifecycle Configuration. - UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - // Cloud KMS Key used to encrypt this object, if the object is encrypted by - // such a key. - KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` - // Output only. The time at which the object's storage class was last changed. - // When the object is initially created, it will be set to time_created. - UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` - // Whether an object is under temporary hold. While this flag is set to true, - // the object is protected against deletion and overwrites. A common use case - // of this flag is regulatory investigations where objects need to be retained - // while the investigation is ongoing. Note that unlike event-based hold, - // temporary hold does not impact retention expiration time of an object. - TemporaryHold bool `protobuf:"varint,20,opt,name=temporary_hold,json=temporaryHold,proto3" json:"temporary_hold,omitempty"` - // A server-determined value that specifies the earliest time that the - // object's retention period expires. - // Note 1: This field is not provided for objects with an active event-based - // hold, since retention expiration is unknown until the hold is removed. - // Note 2: This value can be provided even when temporary hold is set (so that - // the user can reason about policy without having to first unset the - // temporary hold). - RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` - // User-provided metadata, in key/value pairs. - Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Whether an object is under event-based hold. - // An event-based hold is a way to force the retention of an object until - // after some event occurs. Once the hold is released by explicitly setting - // this field to false, the object will become subject to any bucket-level - // retention policy, except that the retention duration will be calculated - // from the time the event based hold was lifted, rather than the time the - // object was created. - // - // In a WriteObject request, not setting this field implies that the value - // should be taken from the parent bucket's "default_event_based_hold" field. - // In a response, this field will always be set to true or false. - EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` - // Output only. The owner of the object. This will always be the uploader of - // the object. - Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` - // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by - // such a key. - CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` - // A user-specified timestamp set on an object. - CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` -} - -func (x *Object) Reset() { - *x = Object{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Object) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Object) ProtoMessage() {} - -func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Object.ProtoReflect.Descriptor instead. -func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} -} - -func (x *Object) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Object) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *Object) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *Object) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *Object) GetMetageneration() int64 { - if x != nil { - return x.Metageneration - } - return 0 -} - -func (x *Object) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -func (x *Object) GetSize() int64 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *Object) GetContentEncoding() string { - if x != nil { - return x.ContentEncoding - } - return "" -} - -func (x *Object) GetContentDisposition() string { - if x != nil { - return x.ContentDisposition - } - return "" -} - -func (x *Object) GetCacheControl() string { - if x != nil { - return x.CacheControl - } - return "" -} - -func (x *Object) GetAcl() []*ObjectAccessControl { - if x != nil { - return x.Acl - } - return nil -} - -func (x *Object) GetContentLanguage() string { - if x != nil { - return x.ContentLanguage - } - return "" -} - -func (x *Object) GetDeleteTime() *timestamppb.Timestamp { - if x != nil { - return x.DeleteTime - } - return nil -} - -func (x *Object) GetContentType() string { - if x != nil { - return x.ContentType - } - return "" -} - -func (x *Object) GetCreateTime() *timestamppb.Timestamp { - if x != nil { - return x.CreateTime - } - return nil -} - -func (x *Object) GetComponentCount() int32 { - if x != nil { - return x.ComponentCount - } - return 0 -} - -func (x *Object) GetChecksums() *ObjectChecksums { - if x != nil { - return x.Checksums - } - return nil -} - -func (x *Object) GetUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateTime - } - return nil -} - -func (x *Object) GetKmsKey() string { - if x != nil { - return x.KmsKey - } - return "" -} - -func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { - if x != nil { - return x.UpdateStorageClassTime - } - return nil -} - -func (x *Object) GetTemporaryHold() bool { - if x != nil { - return x.TemporaryHold - } - return false -} - -func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { - if x != nil { - return x.RetentionExpireTime - } - return nil -} - -func (x *Object) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *Object) GetEventBasedHold() bool { - if x != nil && x.EventBasedHold != nil { - return *x.EventBasedHold - } - return false -} - -func (x *Object) GetOwner() *Owner { - if x != nil { - return x.Owner - } - return nil -} - -func (x *Object) GetCustomerEncryption() *CustomerEncryption { - if x != nil { - return x.CustomerEncryption - } - return nil -} - -func (x *Object) GetCustomTime() *timestamppb.Timestamp { - if x != nil { - return x.CustomTime - } - return nil -} - -// An access-control entry. -type ObjectAccessControl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The access permission for the entity. - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // The ID of the access-control entry. - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team}-{projectnumber}` - // * `project-{team}-{projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com`. - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com`. - // For project entities, `project-{team}-{projectnumber}` format will be - // returned on response. - Entity string `protobuf:"bytes,3,opt,name=entity,proto3" json:"entity,omitempty"` - // Output only. The alternative entity format, if exists. For project - // entities, `project-{team}-{projectid}` format will be returned on response. - EntityAlt string `protobuf:"bytes,9,opt,name=entity_alt,json=entityAlt,proto3" json:"entity_alt,omitempty"` - // The ID for the entity, if any. - EntityId string `protobuf:"bytes,4,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` - // The etag of the ObjectAccessControl. - // If included in the metadata of an update or delete request message, the - // operation will only be performed if the etag matches that of the live - // object's ObjectAccessControl. - Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` - // The email address associated with the entity, if any. - Email string `protobuf:"bytes,5,opt,name=email,proto3" json:"email,omitempty"` - // The domain associated with the entity, if any. - Domain string `protobuf:"bytes,6,opt,name=domain,proto3" json:"domain,omitempty"` - // The project team associated with the entity, if any. - ProjectTeam *ProjectTeam `protobuf:"bytes,7,opt,name=project_team,json=projectTeam,proto3" json:"project_team,omitempty"` -} - -func (x *ObjectAccessControl) Reset() { - *x = ObjectAccessControl{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectAccessControl) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectAccessControl) ProtoMessage() {} - -func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. -func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} -} - -func (x *ObjectAccessControl) GetRole() string { - if x != nil { - return x.Role - } - return "" -} - -func (x *ObjectAccessControl) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *ObjectAccessControl) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *ObjectAccessControl) GetEntityAlt() string { - if x != nil { - return x.EntityAlt - } - return "" -} - -func (x *ObjectAccessControl) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -func (x *ObjectAccessControl) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -func (x *ObjectAccessControl) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *ObjectAccessControl) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ObjectAccessControl) GetProjectTeam() *ProjectTeam { - if x != nil { - return x.ProjectTeam - } - return nil -} - -// The result of a call to Objects.ListObjects -type ListObjectsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of items. - Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"` - // The list of prefixes of objects matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string `protobuf:"bytes,2,rep,name=prefixes,proto3" json:"prefixes,omitempty"` - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - NextPageToken string `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListObjectsResponse) Reset() { - *x = ListObjectsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListObjectsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListObjectsResponse) ProtoMessage() {} - -func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. -func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} -} - -func (x *ListObjectsResponse) GetObjects() []*Object { - if x != nil { - return x.Objects - } - return nil -} - -func (x *ListObjectsResponse) GetPrefixes() []string { - if x != nil { - return x.Prefixes - } - return nil -} - -func (x *ListObjectsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// Represents the Viewers, Editors, or Owners of a given project. -type ProjectTeam struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The project number. - ProjectNumber string `protobuf:"bytes,1,opt,name=project_number,json=projectNumber,proto3" json:"project_number,omitempty"` - // The team. - Team string `protobuf:"bytes,2,opt,name=team,proto3" json:"team,omitempty"` -} - -func (x *ProjectTeam) Reset() { - *x = ProjectTeam{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProjectTeam) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProjectTeam) ProtoMessage() {} - -func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. -func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} -} - -func (x *ProjectTeam) GetProjectNumber() string { - if x != nil { - return x.ProjectNumber - } - return "" -} - -func (x *ProjectTeam) GetTeam() string { - if x != nil { - return x.Team - } - return "" -} - -// A service account, owned by Cloud Storage, which may be used when taking -// action on behalf of a given project, for example to publish Pub/Sub -// notifications or to retrieve security keys. -type ServiceAccount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The ID of the notification. - EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"` -} - -func (x *ServiceAccount) Reset() { - *x = ServiceAccount{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceAccount) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceAccount) ProtoMessage() {} - -func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. -func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} -} - -func (x *ServiceAccount) GetEmailAddress() string { - if x != nil { - return x.EmailAddress - } - return "" -} - -// The owner of a specific resource. -type Owner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The entity, in the form `user-`*userId*. - Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` - // The ID for the entity. - EntityId string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` -} - -func (x *Owner) Reset() { - *x = Owner{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Owner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Owner) ProtoMessage() {} - -func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Owner.ProtoReflect.Descriptor instead. -func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} -} - -func (x *Owner) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *Owner) GetEntityId() string { - if x != nil { - return x.EntityId - } - return "" -} - -// Specifies a requested range of bytes to download. -type ContentRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The starting offset of the object data. This value is inclusive. - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // The ending offset of the object data. This value is exclusive. - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` - // The complete length of the object data. - CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` -} - -func (x *ContentRange) Reset() { - *x = ContentRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ContentRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ContentRange) ProtoMessage() {} - -func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. -func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} -} - -func (x *ContentRange) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *ContentRange) GetEnd() int64 { - if x != nil { - return x.End - } - return 0 -} - -func (x *ContentRange) GetCompleteLength() int64 { - if x != nil { - return x.CompleteLength - } - return 0 -} - -// Description of a source object for a composition request. -type ComposeObjectRequest_SourceObject struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The source object's name. All source objects must reside in the - // same bucket. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The generation of this object to use as the source. - Generation int64 `protobuf:"varint,2,opt,name=generation,proto3" json:"generation,omitempty"` - // Conditions that must be met for this operation to execute. - ObjectPreconditions *ComposeObjectRequest_SourceObject_ObjectPreconditions `protobuf:"bytes,3,opt,name=object_preconditions,json=objectPreconditions,proto3" json:"object_preconditions,omitempty"` -} - -func (x *ComposeObjectRequest_SourceObject) Reset() { - *x = ComposeObjectRequest_SourceObject{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest_SourceObject) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} - -func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *ComposeObjectRequest_SourceObject) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ComposeObjectRequest_SourceObject) GetGeneration() int64 { - if x != nil { - return x.Generation - } - return 0 -} - -func (x *ComposeObjectRequest_SourceObject) GetObjectPreconditions() *ComposeObjectRequest_SourceObject_ObjectPreconditions { - if x != nil { - return x.ObjectPreconditions - } - return nil -} - -// Preconditions for a source object of a composition request. -type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Only perform the composition if the generation of the source object - // that would be used matches this value. If this value and a generation - // are both specified, they must be the same value or the call will fail. - IfGenerationMatch *int64 `protobuf:"varint,1,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { - *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead. -func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0} -} - -func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 { - if x != nil && x.IfGenerationMatch != nil { - return *x.IfGenerationMatch - } - return 0 -} - -// Billing properties of a bucket. -type Bucket_Billing struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // When set to true, Requester Pays is enabled for this bucket. - RequesterPays bool `protobuf:"varint,1,opt,name=requester_pays,json=requesterPays,proto3" json:"requester_pays,omitempty"` -} - -func (x *Bucket_Billing) Reset() { - *x = Bucket_Billing{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Billing) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Billing) ProtoMessage() {} - -func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. -func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} -} - -func (x *Bucket_Billing) GetRequesterPays() bool { - if x != nil { - return x.RequesterPays - } - return false -} - -// Cross-Origin Response sharing (CORS) properties for a bucket. -// For more on Cloud Storage and CORS, see -// https://cloud.google.com/storage/docs/cross-origin. -// For more on CORS in general, see https://tools.ietf.org/html/rfc6454. -type Bucket_Cors struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of Origins eligible to receive CORS response headers. See - // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. - // Note: "*" is permitted in the list of origins, and means "any Origin". - Origin []string `protobuf:"bytes,1,rep,name=origin,proto3" json:"origin,omitempty"` - // The list of HTTP methods on which to include CORS response headers, - // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of - // methods, and means "any method". - Method []string `protobuf:"bytes,2,rep,name=method,proto3" json:"method,omitempty"` - // The list of HTTP headers other than the - // [https://www.w3.org/TR/cors/#simple-response-header][simple response - // headers] to give permission for the user-agent to share across domains. - ResponseHeader []string `protobuf:"bytes,3,rep,name=response_header,json=responseHeader,proto3" json:"response_header,omitempty"` - // The value, in seconds, to return in the - // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age - // header] used in preflight responses. - MaxAgeSeconds int32 `protobuf:"varint,4,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` -} - -func (x *Bucket_Cors) Reset() { - *x = Bucket_Cors{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Cors) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Cors) ProtoMessage() {} - -func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. -func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} -} - -func (x *Bucket_Cors) GetOrigin() []string { - if x != nil { - return x.Origin - } - return nil -} - -func (x *Bucket_Cors) GetMethod() []string { - if x != nil { - return x.Method - } - return nil -} - -func (x *Bucket_Cors) GetResponseHeader() []string { - if x != nil { - return x.ResponseHeader - } - return nil -} - -func (x *Bucket_Cors) GetMaxAgeSeconds() int32 { - if x != nil { - return x.MaxAgeSeconds - } - return 0 -} - -// Encryption properties of a bucket. -type Bucket_Encryption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the Cloud KMS key that will be used to encrypt objects - // inserted into this bucket, if no encryption method is specified. - DefaultKmsKey string `protobuf:"bytes,1,opt,name=default_kms_key,json=defaultKmsKey,proto3" json:"default_kms_key,omitempty"` -} - -func (x *Bucket_Encryption) Reset() { - *x = Bucket_Encryption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Encryption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Encryption) ProtoMessage() {} - -func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. -func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} -} - -func (x *Bucket_Encryption) GetDefaultKmsKey() string { - if x != nil { - return x.DefaultKmsKey - } - return "" -} - -// Bucket restriction options. -type Bucket_IamConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bucket restriction options currently enforced on the bucket. - UniformBucketLevelAccess *Bucket_IamConfig_UniformBucketLevelAccess `protobuf:"bytes,1,opt,name=uniform_bucket_level_access,json=uniformBucketLevelAccess,proto3" json:"uniform_bucket_level_access,omitempty"` - // Whether IAM will enforce public access prevention. Valid values are - // "enforced" or "inherited". - PublicAccessPrevention string `protobuf:"bytes,3,opt,name=public_access_prevention,json=publicAccessPrevention,proto3" json:"public_access_prevention,omitempty"` -} - -func (x *Bucket_IamConfig) Reset() { - *x = Bucket_IamConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_IamConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_IamConfig) ProtoMessage() {} - -func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. -func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} -} - -func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { - if x != nil { - return x.UniformBucketLevelAccess - } - return nil -} - -func (x *Bucket_IamConfig) GetPublicAccessPrevention() string { - if x != nil { - return x.PublicAccessPrevention - } - return "" -} - -// Lifecycle properties of a bucket. -// For more information, see https://cloud.google.com/storage/docs/lifecycle. -type Bucket_Lifecycle struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A lifecycle management rule, which is made of an action to take and the - // condition(s) under which the action will be taken. - Rule []*Bucket_Lifecycle_Rule `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty"` -} - -func (x *Bucket_Lifecycle) Reset() { - *x = Bucket_Lifecycle{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle) ProtoMessage() {} - -func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} -} - -func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { - if x != nil { - return x.Rule - } - return nil -} - -// Logging-related properties of a bucket. -type Bucket_Logging struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The destination bucket where the current bucket's logs should be placed, - // using path format (like `projects/123456/buckets/foo`). - LogBucket string `protobuf:"bytes,1,opt,name=log_bucket,json=logBucket,proto3" json:"log_bucket,omitempty"` - // A prefix for log object names. - LogObjectPrefix string `protobuf:"bytes,2,opt,name=log_object_prefix,json=logObjectPrefix,proto3" json:"log_object_prefix,omitempty"` -} - -func (x *Bucket_Logging) Reset() { - *x = Bucket_Logging{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Logging) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Logging) ProtoMessage() {} - -func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. -func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} -} - -func (x *Bucket_Logging) GetLogBucket() string { - if x != nil { - return x.LogBucket - } - return "" -} - -func (x *Bucket_Logging) GetLogObjectPrefix() string { - if x != nil { - return x.LogObjectPrefix - } - return "" -} - -// Retention policy properties of a bucket. -type Bucket_RetentionPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Server-determined value that indicates the time from which policy was - // enforced and effective. - EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` - // Once locked, an object retention policy cannot be modified. - IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` - // The duration that objects need to be retained. Retention duration must be - // greater than zero and less than 100 years. Note that enforcement of - // retention periods less than a day is not guaranteed. Such periods should - // only be used for testing purposes. Any `nanos` value specified will be - // rounded down to the nearest second. - RetentionDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"` -} - -func (x *Bucket_RetentionPolicy) Reset() { - *x = Bucket_RetentionPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_RetentionPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_RetentionPolicy) ProtoMessage() {} - -func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. -func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} -} - -func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { - if x != nil { - return x.EffectiveTime - } - return nil -} - -func (x *Bucket_RetentionPolicy) GetIsLocked() bool { - if x != nil { - return x.IsLocked - } - return false -} - -func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { - if x != nil { - return x.RetentionDuration - } - return nil -} - -// Soft delete policy properties of a bucket. -type Bucket_SoftDeletePolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The period of time that soft-deleted objects in the bucket must be - // retained and cannot be permanently deleted. The duration must be greater - // than or equal to 7 days and less than 1 year. - RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"` - // Time from which the policy was effective. This is service-provided. - EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"` -} - -func (x *Bucket_SoftDeletePolicy) Reset() { - *x = Bucket_SoftDeletePolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_SoftDeletePolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_SoftDeletePolicy) ProtoMessage() {} - -func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. -func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} -} - -func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { - if x != nil { - return x.RetentionDuration - } - return nil -} - -func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp { - if x != nil { - return x.EffectiveTime - } - return nil -} - -// Properties of a bucket related to versioning. -// For more on Cloud Storage versioning, see -// https://cloud.google.com/storage/docs/object-versioning. -type Bucket_Versioning struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // While set to true, versioning is fully enabled for this bucket. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` -} - -func (x *Bucket_Versioning) Reset() { - *x = Bucket_Versioning{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Versioning) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Versioning) ProtoMessage() {} - -func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. -func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} -} - -func (x *Bucket_Versioning) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -// Properties of a bucket related to accessing the contents as a static -// website. For more on hosting a static website via Cloud Storage, see -// https://cloud.google.com/storage/docs/hosting-static-website. -type Bucket_Website struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If the requested object path is missing, the service will ensure the path - // has a trailing '/', append this suffix, and attempt to retrieve the - // resulting object. This allows the creation of `index.html` - // objects to represent directory pages. - MainPageSuffix string `protobuf:"bytes,1,opt,name=main_page_suffix,json=mainPageSuffix,proto3" json:"main_page_suffix,omitempty"` - // If the requested object path is missing, and any - // `mainPageSuffix` object is missing, if applicable, the service - // will return the named object from this bucket as the content for a - // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] - // result. - NotFoundPage string `protobuf:"bytes,2,opt,name=not_found_page,json=notFoundPage,proto3" json:"not_found_page,omitempty"` -} - -func (x *Bucket_Website) Reset() { - *x = Bucket_Website{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Website) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Website) ProtoMessage() {} - -func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. -func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} -} - -func (x *Bucket_Website) GetMainPageSuffix() string { - if x != nil { - return x.MainPageSuffix - } - return "" -} - -func (x *Bucket_Website) GetNotFoundPage() string { - if x != nil { - return x.NotFoundPage - } - return "" -} - -// Configuration for Custom Dual Regions. It should specify precisely two -// eligible regions within the same Multiregion. More information on regions -// may be found [https://cloud.google.com/storage/docs/locations][here]. -type Bucket_CustomPlacementConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // List of locations to use for data placement. - DataLocations []string `protobuf:"bytes,1,rep,name=data_locations,json=dataLocations,proto3" json:"data_locations,omitempty"` -} - -func (x *Bucket_CustomPlacementConfig) Reset() { - *x = Bucket_CustomPlacementConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_CustomPlacementConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_CustomPlacementConfig) ProtoMessage() {} - -func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. -func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} -} - -func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { - if x != nil { - return x.DataLocations - } - return nil -} - -// Configuration for a bucket's Autoclass feature. -type Bucket_Autoclass struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Enables Autoclass. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // Output only. Latest instant at which the `enabled` field was set to true - // after being disabled/unconfigured or set to false after being enabled. If - // Autoclass is enabled when the bucket is created, the toggle_time is set - // to the bucket creation time. - ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` - // An object in an Autoclass bucket will eventually cool down to the - // terminal storage class if there is no access to the object. - // The only valid values are NEARLINE and ARCHIVE. - TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"` - // Output only. Latest instant at which the autoclass terminal storage class - // was updated. - TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"` -} - -func (x *Bucket_Autoclass) Reset() { - *x = Bucket_Autoclass{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Autoclass) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Autoclass) ProtoMessage() {} - -func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. -func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} -} - -func (x *Bucket_Autoclass) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { - if x != nil { - return x.ToggleTime - } - return nil -} - -func (x *Bucket_Autoclass) GetTerminalStorageClass() string { - if x != nil && x.TerminalStorageClass != nil { - return *x.TerminalStorageClass - } - return "" -} - -func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp { - if x != nil { - return x.TerminalStorageClassUpdateTime - } - return nil -} - -// Settings for Uniform Bucket level access. -// See https://cloud.google.com/storage/docs/uniform-bucket-level-access. -type Bucket_IamConfig_UniformBucketLevelAccess struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If set, access checks only use bucket-level IAM policies or above. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // The deadline time for changing - // `iam_config.uniform_bucket_level_access.enabled` from `true` to - // `false`. Mutable until the specified deadline is reached, but not - // afterward. - LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { - *x = Bucket_IamConfig_UniformBucketLevelAccess{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. -func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { - if x != nil { - return x.LockTime - } - return nil -} - -// A lifecycle Rule, combining an action to take on an object and a -// condition which will trigger that action. -type Bucket_Lifecycle_Rule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The action to take. - Action *Bucket_Lifecycle_Rule_Action `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"` - // The condition(s) under which the action will be taken. - Condition *Bucket_Lifecycle_Rule_Condition `protobuf:"bytes,2,opt,name=condition,proto3" json:"condition,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule) Reset() { - *x = Bucket_Lifecycle_Rule{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} -} - -func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { - if x != nil { - return x.Action - } - return nil -} - -func (x *Bucket_Lifecycle_Rule) GetCondition() *Bucket_Lifecycle_Rule_Condition { - if x != nil { - return x.Condition - } - return nil -} - -// An action to take on an object. -type Bucket_Lifecycle_Rule_Action struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Type of the action. Currently, only `Delete`, `SetStorageClass`, and - // `AbortIncompleteMultipartUpload` are supported. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Target storage class. Required iff the type of the action is - // SetStorageClass. - StorageClass string `protobuf:"bytes,2,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule_Action) Reset() { - *x = Bucket_Lifecycle_Rule_Action{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule_Action) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} -} - -func (x *Bucket_Lifecycle_Rule_Action) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Bucket_Lifecycle_Rule_Action) GetStorageClass() string { - if x != nil { - return x.StorageClass - } - return "" -} - -// A condition of an object which triggers some action. -type Bucket_Lifecycle_Rule_Condition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - // A value of 0 indicates that all objects immediately match this - // condition. - AgeDays *int32 `protobuf:"varint,1,opt,name=age_days,json=ageDays,proto3,oneof" json:"age_days,omitempty"` - // This condition is satisfied when an object is created before midnight - // of the specified date in UTC. - CreatedBefore *date.Date `protobuf:"bytes,2,opt,name=created_before,json=createdBefore,proto3" json:"created_before,omitempty"` - // Relevant only for versioned objects. If the value is - // `true`, this condition matches live objects; if the value - // is `false`, it matches archived objects. - IsLive *bool `protobuf:"varint,3,opt,name=is_live,json=isLive,proto3,oneof" json:"is_live,omitempty"` - // Relevant only for versioned objects. If the value is N, this - // condition is satisfied when there are at least N versions (including - // the live version) newer than this version of the object. - NumNewerVersions *int32 `protobuf:"varint,4,opt,name=num_newer_versions,json=numNewerVersions,proto3,oneof" json:"num_newer_versions,omitempty"` - // Objects having any of the storage classes specified by this condition - // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, - // `NEARLINE`, `COLDLINE`, `STANDARD`, and - // `DURABLE_REDUCED_AVAILABILITY`. - MatchesStorageClass []string `protobuf:"bytes,5,rep,name=matches_storage_class,json=matchesStorageClass,proto3" json:"matches_storage_class,omitempty"` - // Number of days that have elapsed since the custom timestamp set on an - // object. - // The value of the field must be a nonnegative integer. - DaysSinceCustomTime *int32 `protobuf:"varint,7,opt,name=days_since_custom_time,json=daysSinceCustomTime,proto3,oneof" json:"days_since_custom_time,omitempty"` - // An object matches this condition if the custom timestamp set on the - // object is before the specified date in UTC. - CustomTimeBefore *date.Date `protobuf:"bytes,8,opt,name=custom_time_before,json=customTimeBefore,proto3" json:"custom_time_before,omitempty"` - // This condition is relevant only for versioned objects. An object - // version satisfies this condition only if these many days have been - // passed since it became noncurrent. The value of the field must be a - // nonnegative integer. If it's zero, the object version will become - // eligible for Lifecycle action as soon as it becomes noncurrent. - DaysSinceNoncurrentTime *int32 `protobuf:"varint,9,opt,name=days_since_noncurrent_time,json=daysSinceNoncurrentTime,proto3,oneof" json:"days_since_noncurrent_time,omitempty"` - // This condition is relevant only for versioned objects. An object - // version satisfies this condition only if it became noncurrent before - // the specified date in UTC. - NoncurrentTimeBefore *date.Date `protobuf:"bytes,10,opt,name=noncurrent_time_before,json=noncurrentTimeBefore,proto3" json:"noncurrent_time_before,omitempty"` - // List of object name prefixes. If any prefix exactly matches the - // beginning of the object name, the condition evaluates to true. - MatchesPrefix []string `protobuf:"bytes,11,rep,name=matches_prefix,json=matchesPrefix,proto3" json:"matches_prefix,omitempty"` - // List of object name suffixes. If any suffix exactly matches the - // end of the object name, the condition evaluates to true. - MatchesSuffix []string `protobuf:"bytes,12,rep,name=matches_suffix,json=matchesSuffix,proto3" json:"matches_suffix,omitempty"` -} - -func (x *Bucket_Lifecycle_Rule_Condition) Reset() { - *x = Bucket_Lifecycle_Rule_Condition{} - if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket_Lifecycle_Rule_Condition) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} - -func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. -func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { - if x != nil && x.AgeDays != nil { - return *x.AgeDays - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetCreatedBefore() *date.Date { - if x != nil { - return x.CreatedBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetIsLive() bool { - if x != nil && x.IsLive != nil { - return *x.IsLive - } - return false -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetNumNewerVersions() int32 { - if x != nil && x.NumNewerVersions != nil { - return *x.NumNewerVersions - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesStorageClass() []string { - if x != nil { - return x.MatchesStorageClass - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceCustomTime() int32 { - if x != nil && x.DaysSinceCustomTime != nil { - return *x.DaysSinceCustomTime - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetCustomTimeBefore() *date.Date { - if x != nil { - return x.CustomTimeBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetDaysSinceNoncurrentTime() int32 { - if x != nil && x.DaysSinceNoncurrentTime != nil { - return *x.DaysSinceNoncurrentTime - } - return 0 -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetNoncurrentTimeBefore() *date.Date { - if x != nil { - return x.NoncurrentTimeBefore - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesPrefix() []string { - if x != nil { - return x.MatchesPrefix - } - return nil -} - -func (x *Bucket_Lifecycle_Rule_Condition) GetMatchesSuffix() []string { - if x != nil { - return x.MatchesSuffix - } - return nil -} - -var File_google_storage_v2_storage_proto protoreflect.FileDescriptor - -var file_google_storage_v2_storage_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, - 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, - 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, - 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, - 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, - 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, - 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, - 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, - 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, - 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, - 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, - 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, - 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, - 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, - 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, - 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, - 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, - 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, - 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, - 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, - 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, - 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, - 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, - 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, - 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, - 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, - 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, - 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, - 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, - 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, - 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, - 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, - 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, - 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, - 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, - 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, - 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, - 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, - 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, - 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, - 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, - 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, - 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, - 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, - 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, - 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, - 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, - 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, - 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, - 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, - 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, - 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, - 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, - 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, - 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, - 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, - 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, - 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, - 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, - 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, - 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, - 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, - 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, - 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, - 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, - 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, - 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, - 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, - 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, - 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, - 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, - 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, - 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, - 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, - 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, - 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, - 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, - 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, - 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, - 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, - 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, - 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, - 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, - 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, - 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, - 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, - 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, - 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, - 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, - 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, - 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, - 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, - 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, - 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, - 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, - 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, - 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, - 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, - 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, - 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, - 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, - 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, - 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, - 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, - 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, - 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, - 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, - 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, - 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, - 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, - 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, - 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, - 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, 0x27, 0x0a, 0x07, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xd7, 0x01, - 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, - 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, - 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, - 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, - 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, - 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, - 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, - 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, - 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, - 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, - 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, - 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, - 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, - 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, - 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, - 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, - 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, - 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, - 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_storage_v2_storage_proto_rawDescOnce sync.Once - file_google_storage_v2_storage_proto_rawDescData = file_google_storage_v2_storage_proto_rawDesc -) - -func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { - file_google_storage_v2_storage_proto_rawDescOnce.Do(func() { - file_google_storage_v2_storage_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_storage_v2_storage_proto_rawDescData) - }) - return file_google_storage_v2_storage_proto_rawDescData -} - -var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) -var file_google_storage_v2_storage_proto_goTypes = []interface{}{ - (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values - (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest - (*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest - (*CreateBucketRequest)(nil), // 3: google.storage.v2.CreateBucketRequest - (*ListBucketsRequest)(nil), // 4: google.storage.v2.ListBucketsRequest - (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse - (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest - (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest - (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest - (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest - (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest - (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse - (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest - (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest - (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse - (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest - (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse - (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 44: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata - (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig - (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption - (*Object)(nil), // 51: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount - (*Owner)(nil), // 56: google.storage.v2.Owner - (*ContentRange)(nil), // 57: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy - (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass - nil, // 72: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 78: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 81: google.protobuf.Duration - (*date.Date)(nil), // 82: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 86: google.protobuf.Empty - (*iampb.Policy)(nil), // 87: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse -} -var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig - 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig - 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 132, // [132:164] is the sub-list for method output_type - 100, // [100:132] is the sub-list for method input_type - 100, // [100:100] is the sub-list for extension type_name - 100, // [100:100] is the sub-list for extension extendee - 0, // [0:100] is the sub-list for field type_name -} - -func init() { file_google_storage_v2_storage_proto_init() } -func file_google_storage_v2_storage_proto_init() { - if File_google_storage_v2_storage_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LockBucketRetentionPolicyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateBucketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNotificationConfigRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationConfigsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationConfigsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BidiWriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BidiWriteObjectResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotificationConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_SoftDeletePolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule_Action); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ - (*WriteObjectRequest_UploadId)(nil), - (*WriteObjectRequest_WriteObjectSpec)(nil), - (*WriteObjectRequest_ChecksummedData)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ - (*WriteObjectResponse_PersistedSize)(nil), - (*WriteObjectResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ - (*BidiWriteObjectRequest_UploadId)(nil), - (*BidiWriteObjectRequest_WriteObjectSpec)(nil), - (*BidiWriteObjectRequest_ChecksummedData)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ - (*BidiWriteObjectResponse_PersistedSize)(nil), - (*BidiWriteObjectResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ - (*QueryWriteStatusResponse_PersistedSize)(nil), - (*QueryWriteStatusResponse_Resource)(nil), - } - file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, - NumEnums: 1, - NumMessages: 78, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_google_storage_v2_storage_proto_goTypes, - DependencyIndexes: file_google_storage_v2_storage_proto_depIdxs, - EnumInfos: file_google_storage_v2_storage_proto_enumTypes, - MessageInfos: file_google_storage_v2_storage_proto_msgTypes, - }.Build() - File_google_storage_v2_storage_proto = out.File - file_google_storage_v2_storage_proto_rawDesc = nil - file_google_storage_v2_storage_proto_goTypes = nil - file_google_storage_v2_storage_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// StorageClient is the client API for Storage service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type StorageClient interface { - // Permanently deletes an empty bucket. - DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Returns metadata for the specified bucket. - GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Creates a new bucket. - CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Retrieves a list of buckets for a given project. - ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) - // Locks retention policy on a bucket. - LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) - // Gets the IAM policy for a specified bucket. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}`. - GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}`. - SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. - TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) - // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. - UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) - // Concatenates a list of existing objects into a new object in the same - // bucket. - ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Deletes an object and its metadata. - // - // Deletions are normally permanent when versioning is disabled or whenever - // the generation parameter is used. However, if soft delete is enabled for - // the bucket, deleted objects can be restored using RestoreObject until the - // soft delete retention period has passed. - DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Restores a soft-deleted object. - RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Cancels an in-progress resumable upload. - // - // Any attempts to write to the resumable upload after cancelling the upload - // will fail. - // - // The behavior for currently in progress write operations is not guaranteed - - // they could either complete before the cancellation or fail if the - // cancellation completes first. - CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) - // Retrieves an object's metadata. - GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Reads an object's data. - ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) - // Updates an object's metadata. - // Equivalent to JSON API's storage.objects.patch. - UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Stores a new object and metadata. - // - // An object can be written either in a single message stream or in a - // resumable sequence of message streams. To write using a single stream, - // the client should include in the first message of the stream an - // `WriteObjectSpec` describing the destination bucket, object, and any - // preconditions. Additionally, the final message must set 'finish_write' to - // true, or else it is an error. - // - // For a resumable write, the client should instead call - // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. - // They should then attach the returned `upload_id` to the first message of - // each following call to `WriteObject`. If the stream is closed before - // finishing the upload (either explicitly by the client or due to a network - // error or an error response from the server), the client should do as - // follows: - // - Check the result Status of the stream, to determine if writing can be - // resumed on this stream or must be restarted from scratch (by calling - // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, - // INTERNAL, and UNAVAILABLE. For each case, the client should use binary - // exponential backoff before retrying. Additionally, writes can be - // resumed after RESOURCE_EXHAUSTED errors, but only after taking - // appropriate measures, which may include reducing aggregate send rate - // across clients and/or requesting a quota increase for your project. - // - If the call to `WriteObject` returns `ABORTED`, that indicates - // concurrent attempts to update the resumable write, caused either by - // multiple racing clients or by a single client where the previous - // request was timed out on the client side but nonetheless reached the - // server. In this case the client should take steps to prevent further - // concurrent writes (e.g., increase the timeouts, stop using more than - // one process to perform the upload, etc.), and then should follow the - // steps below for resuming the upload. - // - For resumable errors, the client should call `QueryWriteStatus()` and - // then continue writing from the returned `persisted_size`. This may be - // less than the amount of data the client previously sent. Note also that - // it is acceptable to send data starting at an offset earlier than the - // returned `persisted_size`; in this case, the service will skip data at - // offsets that were already persisted (without checking that it matches - // the previously written data), and write only the data starting from the - // persisted offset. Even though the data isn't written, it may still - // incur a performance cost over resuming at the correct write offset. - // This behavior can make client-side handling simpler in some cases. - // - Clients must only send data that is a multiple of 256 KiB per message, - // unless the object is being finished with `finish_write` set to `true`. - // - // The service will not view the object as complete until the client has - // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any - // requests on a stream after sending a request with `finish_write` set to - // `true` will cause an error. The client **should** check the response it - // receives to determine how much data the service was able to commit and - // whether the service views the object as complete. - // - // Attempting to resume an already finalized object will result in an OK - // status, with a WriteObjectResponse containing the finalized object's - // metadata. - // - // Alternatively, the BidiWriteObject operation may be used to write an - // object with controls over flushing and the ability to fetch the ability to - // determine the current persisted size. - WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) - // Stores a new object and metadata. - // - // This is similar to the WriteObject call with the added support for - // manual flushing of persisted state, and the ability to determine current - // persisted size without closing the stream. - // - // The client may specify one or both of the `state_lookup` and `flush` fields - // in each BidiWriteObjectRequest. If `flush` is specified, the data written - // so far will be persisted to storage. If `state_lookup` is specified, the - // service will respond with a BidiWriteObjectResponse that contains the - // persisted size. If both `flush` and `state_lookup` are specified, the flush - // will always occur before a `state_lookup`, so that both may be set in the - // same request and the returned state will be the state of the object - // post-flush. When the stream is closed, a BidiWriteObjectResponse will - // always be sent to the client, regardless of the value of `state_lookup`. - BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) - // Retrieves a list of objects matching the criteria. - ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) - // Rewrites a source object to a destination object. Optionally overrides - // metadata. - RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) -} - -type storageClient struct { - cc grpc.ClientConnInterface -} - -func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { - return &storageClient{cc} -} - -func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { - out := new(ListBucketsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { - out := new(iampb.Policy) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { - out := new(iampb.Policy) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { - out := new(iampb.TestIamPermissionsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) { - out := new(Bucket) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { - out := new(NotificationConfig) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { - out := new(ListNotificationConfigsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ComposeObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RestoreObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { - out := new(CancelResumableWriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[0], "/google.storage.v2.Storage/ReadObject", opts...) - if err != nil { - return nil, err - } - x := &storageReadObjectClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Storage_ReadObjectClient interface { - Recv() (*ReadObjectResponse, error) - grpc.ClientStream -} - -type storageReadObjectClient struct { - grpc.ClientStream -} - -func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) { - m := new(ReadObjectResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) { - out := new(Object) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...) - if err != nil { - return nil, err - } - x := &storageWriteObjectClient{stream} - return x, nil -} - -type Storage_WriteObjectClient interface { - Send(*WriteObjectRequest) error - CloseAndRecv() (*WriteObjectResponse, error) - grpc.ClientStream -} - -type storageWriteObjectClient struct { - grpc.ClientStream -} - -func (x *storageWriteObjectClient) Send(m *WriteObjectRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(WriteObjectResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { - stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) - if err != nil { - return nil, err - } - x := &storageBidiWriteObjectClient{stream} - return x, nil -} - -type Storage_BidiWriteObjectClient interface { - Send(*BidiWriteObjectRequest) error - Recv() (*BidiWriteObjectResponse, error) - grpc.ClientStream -} - -type storageBidiWriteObjectClient struct { - grpc.ClientStream -} - -func (x *storageBidiWriteObjectClient) Send(m *BidiWriteObjectRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storageBidiWriteObjectClient) Recv() (*BidiWriteObjectResponse, error) { - m := new(BidiWriteObjectResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { - out := new(ListObjectsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error) { - out := new(RewriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RewriteObject", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error) { - out := new(StartResumableWriteResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/StartResumableWrite", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error) { - out := new(QueryWriteStatusResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/QueryWriteStatus", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) { - out := new(ServiceAccount) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) { - out := new(CreateHmacKeyResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) { - out := new(ListHmacKeysResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) { - out := new(HmacKeyMetadata) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StorageServer is the server API for Storage service. -type StorageServer interface { - // Permanently deletes an empty bucket. - DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) - // Returns metadata for the specified bucket. - GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) - // Creates a new bucket. - CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) - // Retrieves a list of buckets for a given project. - ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) - // Locks retention policy on a bucket. - LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) - // Gets the IAM policy for a specified bucket. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}`. - GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}`. - SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) - // Tests a set of permissions on the given bucket or object to see which, if - // any, are held by the caller. - // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. - TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) - // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. - UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a NotificationConfig. - DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) - // View a NotificationConfig. - GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) - // Creates a NotificationConfig for a given bucket. - // These NotificationConfigs, when triggered, publish messages to the - // specified Pub/Sub topics. See - // https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) - // Retrieves a list of NotificationConfigs for a given bucket. - ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) - // Concatenates a list of existing objects into a new object in the same - // bucket. - ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) - // Deletes an object and its metadata. - // - // Deletions are normally permanent when versioning is disabled or whenever - // the generation parameter is used. However, if soft delete is enabled for - // the bucket, deleted objects can be restored using RestoreObject until the - // soft delete retention period has passed. - DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) - // Restores a soft-deleted object. - RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) - // Cancels an in-progress resumable upload. - // - // Any attempts to write to the resumable upload after cancelling the upload - // will fail. - // - // The behavior for currently in progress write operations is not guaranteed - - // they could either complete before the cancellation or fail if the - // cancellation completes first. - CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) - // Retrieves an object's metadata. - GetObject(context.Context, *GetObjectRequest) (*Object, error) - // Reads an object's data. - ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error - // Updates an object's metadata. - // Equivalent to JSON API's storage.objects.patch. - UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) - // Stores a new object and metadata. - // - // An object can be written either in a single message stream or in a - // resumable sequence of message streams. To write using a single stream, - // the client should include in the first message of the stream an - // `WriteObjectSpec` describing the destination bucket, object, and any - // preconditions. Additionally, the final message must set 'finish_write' to - // true, or else it is an error. - // - // For a resumable write, the client should instead call - // `StartResumableWrite()`, populating a `WriteObjectSpec` into that request. - // They should then attach the returned `upload_id` to the first message of - // each following call to `WriteObject`. If the stream is closed before - // finishing the upload (either explicitly by the client or due to a network - // error or an error response from the server), the client should do as - // follows: - // - Check the result Status of the stream, to determine if writing can be - // resumed on this stream or must be restarted from scratch (by calling - // `StartResumableWrite()`). The resumable errors are DEADLINE_EXCEEDED, - // INTERNAL, and UNAVAILABLE. For each case, the client should use binary - // exponential backoff before retrying. Additionally, writes can be - // resumed after RESOURCE_EXHAUSTED errors, but only after taking - // appropriate measures, which may include reducing aggregate send rate - // across clients and/or requesting a quota increase for your project. - // - If the call to `WriteObject` returns `ABORTED`, that indicates - // concurrent attempts to update the resumable write, caused either by - // multiple racing clients or by a single client where the previous - // request was timed out on the client side but nonetheless reached the - // server. In this case the client should take steps to prevent further - // concurrent writes (e.g., increase the timeouts, stop using more than - // one process to perform the upload, etc.), and then should follow the - // steps below for resuming the upload. - // - For resumable errors, the client should call `QueryWriteStatus()` and - // then continue writing from the returned `persisted_size`. This may be - // less than the amount of data the client previously sent. Note also that - // it is acceptable to send data starting at an offset earlier than the - // returned `persisted_size`; in this case, the service will skip data at - // offsets that were already persisted (without checking that it matches - // the previously written data), and write only the data starting from the - // persisted offset. Even though the data isn't written, it may still - // incur a performance cost over resuming at the correct write offset. - // This behavior can make client-side handling simpler in some cases. - // - Clients must only send data that is a multiple of 256 KiB per message, - // unless the object is being finished with `finish_write` set to `true`. - // - // The service will not view the object as complete until the client has - // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any - // requests on a stream after sending a request with `finish_write` set to - // `true` will cause an error. The client **should** check the response it - // receives to determine how much data the service was able to commit and - // whether the service views the object as complete. - // - // Attempting to resume an already finalized object will result in an OK - // status, with a WriteObjectResponse containing the finalized object's - // metadata. - // - // Alternatively, the BidiWriteObject operation may be used to write an - // object with controls over flushing and the ability to fetch the ability to - // determine the current persisted size. - WriteObject(Storage_WriteObjectServer) error - // Stores a new object and metadata. - // - // This is similar to the WriteObject call with the added support for - // manual flushing of persisted state, and the ability to determine current - // persisted size without closing the stream. - // - // The client may specify one or both of the `state_lookup` and `flush` fields - // in each BidiWriteObjectRequest. If `flush` is specified, the data written - // so far will be persisted to storage. If `state_lookup` is specified, the - // service will respond with a BidiWriteObjectResponse that contains the - // persisted size. If both `flush` and `state_lookup` are specified, the flush - // will always occur before a `state_lookup`, so that both may be set in the - // same request and the returned state will be the state of the object - // post-flush. When the stream is closed, a BidiWriteObjectResponse will - // always be sent to the client, regardless of the value of `state_lookup`. - BidiWriteObject(Storage_BidiWriteObjectServer) error - // Retrieves a list of objects matching the criteria. - ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) - // Rewrites a source object to a destination object. Optionally overrides - // metadata. - RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) - // Determines the `persisted_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `persisted_size` values will be - // non-decreasing. - QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) - // Retrieves the name of a project's Google Cloud Storage service account. - GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) - // Creates a new HMAC key for the given service account. - CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) - // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) - // Gets an existing HMAC key metadata for the given id. - GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) - // Lists HMAC keys under a given project with the additional filters provided. - ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) - // Updates a given HMAC key state between ACTIVE and INACTIVE. - UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) -} - -// UnimplementedStorageServer can be embedded to have forward compatible implementations. -type UnimplementedStorageServer struct { -} - -func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") -} -func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented") -} -func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented") -} -func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") -} -func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") -} -func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") -} -func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") -} -func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") -} -func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") -} -func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") -} -func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") -} -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") -} -func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") -} -func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") -} -func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented") -} -func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error { - return status.Errorf(codes.Unimplemented, "method ReadObject not implemented") -} -func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented") -} -func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { - return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") -} -func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { - return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") -} -func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") -} -func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented") -} -func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented") -} -func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented") -} -func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented") -} -func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") -} -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") -} -func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented") -} -func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented") -} -func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented") -} - -func RegisterStorageServer(s *grpc.Server, srv StorageServer) { - s.RegisterService(&_Storage_serviceDesc, srv) -} - -func _Storage_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteBucket(ctx, req.(*DeleteBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetBucket(ctx, req.(*GetBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateBucket(ctx, req.(*CreateBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListBucketsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListBuckets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListBuckets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListBuckets(ctx, req.(*ListBucketsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LockBucketRetentionPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).LockBucketRetentionPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/LockBucketRetentionPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).LockBucketRetentionPolicy(ctx, req.(*LockBucketRetentionPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(iampb.GetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(iampb.SetIamPolicyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).SetIamPolicy(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/SetIamPolicy", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(iampb.TestIamPermissionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).TestIamPermissions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/TestIamPermissions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateBucketRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateBucket(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateBucket", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateBucket(ctx, req.(*UpdateBucketRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateNotificationConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationConfigsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListNotificationConfigs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ComposeObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ComposeObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ComposeObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ComposeObject(ctx, req.(*ComposeObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteObject(ctx, req.(*DeleteObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RestoreObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).RestoreObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/RestoreObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).RestoreObject(ctx, req.(*RestoreObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelResumableWriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CancelResumableWrite(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CancelResumableWrite", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CancelResumableWrite(ctx, req.(*CancelResumableWriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetObject(ctx, req.(*GetObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ReadObject_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadObjectRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StorageServer).ReadObject(m, &storageReadObjectServer{stream}) -} - -type Storage_ReadObjectServer interface { - Send(*ReadObjectResponse) error - grpc.ServerStream -} - -type storageReadObjectServer struct { - grpc.ServerStream -} - -func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateObject(ctx, req.(*UpdateObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_WriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StorageServer).WriteObject(&storageWriteObjectServer{stream}) -} - -type Storage_WriteObjectServer interface { - SendAndClose(*WriteObjectResponse) error - Recv() (*WriteObjectRequest, error) - grpc.ServerStream -} - -type storageWriteObjectServer struct { - grpc.ServerStream -} - -func (x *storageWriteObjectServer) SendAndClose(m *WriteObjectResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { - m := new(WriteObjectRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Storage_BidiWriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StorageServer).BidiWriteObject(&storageBidiWriteObjectServer{stream}) -} - -type Storage_BidiWriteObjectServer interface { - Send(*BidiWriteObjectResponse) error - Recv() (*BidiWriteObjectRequest, error) - grpc.ServerStream -} - -type storageBidiWriteObjectServer struct { - grpc.ServerStream -} - -func (x *storageBidiWriteObjectServer) Send(m *BidiWriteObjectResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storageBidiWriteObjectServer) Recv() (*BidiWriteObjectRequest, error) { - m := new(BidiWriteObjectRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListObjectsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListObjects(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListObjects", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListObjects(ctx, req.(*ListObjectsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_RewriteObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RewriteObjectRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).RewriteObject(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/RewriteObject", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).RewriteObject(ctx, req.(*RewriteObjectRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_StartResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartResumableWriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).StartResumableWrite(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/StartResumableWrite", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).StartResumableWrite(ctx, req.(*StartResumableWriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryWriteStatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).QueryWriteStatus(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/QueryWriteStatus", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).QueryWriteStatus(ctx, req.(*QueryWriteStatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServiceAccountRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetServiceAccount(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetServiceAccount", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).CreateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).DeleteHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).GetHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/GetHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListHmacKeysRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).ListHmacKeys(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/ListHmacKeys", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateHmacKeyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StorageServer).UpdateHmacKey(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.storage.v2.Storage/UpdateHmacKey", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Storage_serviceDesc = grpc.ServiceDesc{ - ServiceName: "google.storage.v2.Storage", - HandlerType: (*StorageServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DeleteBucket", - Handler: _Storage_DeleteBucket_Handler, - }, - { - MethodName: "GetBucket", - Handler: _Storage_GetBucket_Handler, - }, - { - MethodName: "CreateBucket", - Handler: _Storage_CreateBucket_Handler, - }, - { - MethodName: "ListBuckets", - Handler: _Storage_ListBuckets_Handler, - }, - { - MethodName: "LockBucketRetentionPolicy", - Handler: _Storage_LockBucketRetentionPolicy_Handler, - }, - { - MethodName: "GetIamPolicy", - Handler: _Storage_GetIamPolicy_Handler, - }, - { - MethodName: "SetIamPolicy", - Handler: _Storage_SetIamPolicy_Handler, - }, - { - MethodName: "TestIamPermissions", - Handler: _Storage_TestIamPermissions_Handler, - }, - { - MethodName: "UpdateBucket", - Handler: _Storage_UpdateBucket_Handler, - }, - { - MethodName: "DeleteNotificationConfig", - Handler: _Storage_DeleteNotificationConfig_Handler, - }, - { - MethodName: "GetNotificationConfig", - Handler: _Storage_GetNotificationConfig_Handler, - }, - { - MethodName: "CreateNotificationConfig", - Handler: _Storage_CreateNotificationConfig_Handler, - }, - { - MethodName: "ListNotificationConfigs", - Handler: _Storage_ListNotificationConfigs_Handler, - }, - { - MethodName: "ComposeObject", - Handler: _Storage_ComposeObject_Handler, - }, - { - MethodName: "DeleteObject", - Handler: _Storage_DeleteObject_Handler, - }, - { - MethodName: "RestoreObject", - Handler: _Storage_RestoreObject_Handler, - }, - { - MethodName: "CancelResumableWrite", - Handler: _Storage_CancelResumableWrite_Handler, - }, - { - MethodName: "GetObject", - Handler: _Storage_GetObject_Handler, - }, - { - MethodName: "UpdateObject", - Handler: _Storage_UpdateObject_Handler, - }, - { - MethodName: "ListObjects", - Handler: _Storage_ListObjects_Handler, - }, - { - MethodName: "RewriteObject", - Handler: _Storage_RewriteObject_Handler, - }, - { - MethodName: "StartResumableWrite", - Handler: _Storage_StartResumableWrite_Handler, - }, - { - MethodName: "QueryWriteStatus", - Handler: _Storage_QueryWriteStatus_Handler, - }, - { - MethodName: "GetServiceAccount", - Handler: _Storage_GetServiceAccount_Handler, - }, - { - MethodName: "CreateHmacKey", - Handler: _Storage_CreateHmacKey_Handler, - }, - { - MethodName: "DeleteHmacKey", - Handler: _Storage_DeleteHmacKey_Handler, - }, - { - MethodName: "GetHmacKey", - Handler: _Storage_GetHmacKey_Handler, - }, - { - MethodName: "ListHmacKeys", - Handler: _Storage_ListHmacKeys_Handler, - }, - { - MethodName: "UpdateHmacKey", - Handler: _Storage_UpdateHmacKey_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ReadObject", - Handler: _Storage_ReadObject_Handler, - ServerStreams: true, - }, - { - StreamName: "WriteObject", - Handler: _Storage_WriteObject_Handler, - ClientStreams: true, - }, - { - StreamName: "BidiWriteObject", - Handler: _Storage_BidiWriteObject_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "google/storage/v2/storage.proto", -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/version.go b/vendor/cloud.google.com/go/storage/internal/apiv2/version.go deleted file mode 100644 index 15920f3f6..000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/version.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by gapicgen. DO NOT EDIT. - -package storage - -import "cloud.google.com/go/storage/internal" - -func init() { - versionClient = internal.Version -} diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go deleted file mode 100644 index 8d99cbc09..000000000 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -// Version is the current tagged release of the library. -const Version = "1.38.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go deleted file mode 100644 index 1b52eb5d2..000000000 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/url" - "strings" - - "cloud.google.com/go/internal" - "cloud.google.com/go/internal/version" - sinternal "cloud.google.com/go/storage/internal" - "github.com/google/uuid" - gax "github.com/googleapis/gax-go/v2" - "github.com/googleapis/gax-go/v2/callctx" - "google.golang.org/api/googleapi" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var defaultRetry *retryConfig = &retryConfig{} -var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) - -const ( - xGoogHeaderKey = "x-goog-api-client" - idempotencyHeaderKey = "x-goog-gcs-idempotency-token" -) - -// run determines whether a retry is necessary based on the config and -// idempotency information. It then calls the function with or without retries -// as appropriate, using the configured settings. -func run(ctx context.Context, call func(ctx context.Context) error, retry *retryConfig, isIdempotent bool) error { - attempts := 1 - invocationID := uuid.New().String() - - if retry == nil { - retry = defaultRetry - } - if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { - ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) - return call(ctxWithHeaders) - } - bo := gax.Backoff{} - if retry.backoff != nil { - bo.Multiplier = retry.backoff.Multiplier - bo.Initial = retry.backoff.Initial - bo.Max = retry.backoff.Max - } - var errorFunc func(err error) bool = ShouldRetry - if retry.shouldRetry != nil { - errorFunc = retry.shouldRetry - } - - return internal.Retry(ctx, bo, func() (stop bool, err error) { - ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) - err = call(ctxWithHeaders) - if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { - return true, err - } - attempts++ - return !errorFunc(err), err - }) -} - -// Sets invocation ID headers on the context which will be propagated as -// headers in the call to the service (for both gRPC and HTTP). -func setInvocationHeaders(ctx context.Context, invocationID string, attempts int) context.Context { - invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - - ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) - ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) - return ctx -} - -// ShouldRetry returns true if an error is retryable, based on best practice -// guidance from GCS. See -// https://cloud.google.com/storage/docs/retry-strategy#go for more information -// on what errors are considered retryable. -// -// If you would like to customize retryable errors, use the WithErrorFunc to -// supply a RetryOption to your library calls. For example, to retry additional -// errors, you can write a custom func that wraps ShouldRetry and also specifies -// additional errors that should return true. -func ShouldRetry(err error) bool { - if err == nil { - return false - } - if errors.Is(err, io.ErrUnexpectedEOF) { - return true - } - - switch e := err.(type) { - case *net.OpError: - if strings.Contains(e.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string - return true - } - case *googleapi.Error: - // Retry on 408, 429, and 5xx, according to - // https://cloud.google.com/storage/docs/exponential-backoff. - return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600) - case *url.Error: - // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). - // Unfortunately the error type is unexported, so we resort to string - // matching. - retriable := []string{"connection refused", "connection reset"} - for _, s := range retriable { - if strings.Contains(e.Error(), s) { - return true - } - } - case interface{ Temporary() bool }: - if e.Temporary() { - return true - } - } - // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. - if st, ok := status.FromError(err); ok { - if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { - return true - } - } - // Unwrap is only supported in go1.13.x+ - if e, ok := err.(interface{ Unwrap() error }); ok { - return ShouldRetry(e.Unwrap()) - } - return false -} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go deleted file mode 100644 index 56f3e3daa..000000000 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "regexp" - - "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal/apiv2/storagepb" - raw "google.golang.org/api/storage/v1" -) - -// A Notification describes how to send Cloud PubSub messages when certain -// events occur in a bucket. -type Notification struct { - //The ID of the notification. - ID string - - // The ID of the topic to which this subscription publishes. - TopicID string - - // The ID of the project to which the topic belongs. - TopicProjectID string - - // Only send notifications about listed event types. If empty, send notifications - // for all event types. - // See https://cloud.google.com/storage/docs/pubsub-notifications#events. - EventTypes []string - - // If present, only apply this notification configuration to object names that - // begin with this prefix. - ObjectNamePrefix string - - // An optional list of additional attributes to attach to each Cloud PubSub - // message published for this notification subscription. - CustomAttributes map[string]string - - // The contents of the message payload. - // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. - PayloadFormat string -} - -// Values for Notification.PayloadFormat. -const ( - // Send no payload with notification messages. - NoPayload = "NONE" - - // Send object metadata as JSON with notification messages. - JSONPayload = "JSON_API_V1" -) - -// Values for Notification.EventTypes. -const ( - // Event that occurs when an object is successfully created. - ObjectFinalizeEvent = "OBJECT_FINALIZE" - - // Event that occurs when the metadata of an existing object changes. - ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" - - // Event that occurs when an object is permanently deleted. - ObjectDeleteEvent = "OBJECT_DELETE" - - // Event that occurs when the live version of an object becomes an - // archived version. - ObjectArchiveEvent = "OBJECT_ARCHIVE" -) - -func toNotification(rn *raw.Notification) *Notification { - n := &Notification{ - ID: rn.Id, - EventTypes: rn.EventTypes, - ObjectNamePrefix: rn.ObjectNamePrefix, - CustomAttributes: rn.CustomAttributes, - PayloadFormat: rn.PayloadFormat, - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) - return n -} - -func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { - n := &Notification{ - ID: pbn.GetName(), - EventTypes: pbn.GetEventTypes(), - ObjectNamePrefix: pbn.GetObjectNamePrefix(), - CustomAttributes: pbn.GetCustomAttributes(), - PayloadFormat: pbn.GetPayloadFormat(), - } - n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic) - return n -} - -func toProtoNotification(n *Notification) *storagepb.NotificationConfig { - return &storagepb.NotificationConfig{ - Name: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: n.PayloadFormat, - } -} - -var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") - -// parseNotificationTopic extracts the project and topic IDs from from the full -// resource name returned by the service. If the name is malformed, it returns -// "?" for both IDs. -func parseNotificationTopic(nt string) (projectID, topicID string) { - matches := topicRE.FindStringSubmatch(nt) - if matches == nil { - return "?", "?" - } - return matches[1], matches[2] -} - -func toRawNotification(n *Notification) *raw.Notification { - return &raw.Notification{ - Id: n.ID, - Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", - n.TopicProjectID, n.TopicID), - EventTypes: n.EventTypes, - ObjectNamePrefix: n.ObjectNamePrefix, - CustomAttributes: n.CustomAttributes, - PayloadFormat: string(n.PayloadFormat), - } -} - -// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID -// and PayloadFormat, and must not set its ID. The other fields are all optional. The -// returned Notification's ID can be used to refer to it. -func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") - defer func() { trace.EndSpan(ctx, err) }() - - if n.ID != "" { - return nil, errors.New("storage: AddNotification: ID must not be set") - } - if n.TopicProjectID == "" { - return nil, errors.New("storage: AddNotification: missing TopicProjectID") - } - if n.TopicID == "" { - return nil, errors.New("storage: AddNotification: missing TopicID") - } - - opts := makeStorageOpts(false, b.retry, b.userProject) - ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...) - return ret, err -} - -// Notifications returns all the Notifications configured for this bucket, as a map -// indexed by notification ID. -func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") - defer func() { trace.EndSpan(ctx, err) }() - - opts := makeStorageOpts(true, b.retry, b.userProject) - n, err = b.c.tc.ListNotifications(ctx, b.name, opts...) - return n, err -} - -func notificationsToMap(rns []*raw.Notification) map[string]*Notification { - m := map[string]*Notification{} - for _, rn := range rns { - m[rn.Id] = toNotification(rn) - } - return m -} - -func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { - m := map[string]*Notification{} - for _, n := range ns { - m[n.Name] = toNotificationFromProto(n) - } - return m -} - -// DeleteNotification deletes the notification with the given ID. -func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") - defer func() { trace.EndSpan(ctx, err) }() - - opts := makeStorageOpts(true, b.retry, b.userProject) - return b.c.tc.DeleteNotification(ctx, b.name, id, opts...) -} diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go deleted file mode 100644 index e72ceb78f..000000000 --- a/vendor/cloud.google.com/go/storage/option.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" -) - -// storageConfig contains the Storage client option configuration that can be -// set through storageClientOptions. -type storageConfig struct { - useJSONforReads bool - readAPIWasSet bool -} - -// newStorageConfig generates a new storageConfig with all the given -// storageClientOptions applied. -func newStorageConfig(opts ...option.ClientOption) storageConfig { - var conf storageConfig - for _, opt := range opts { - if storageOpt, ok := opt.(storageClientOption); ok { - storageOpt.ApplyStorageOpt(&conf) - } - } - return conf -} - -// A storageClientOption is an option for a Google Storage client. -type storageClientOption interface { - option.ClientOption - ApplyStorageOpt(*storageConfig) -} - -// WithJSONReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the JSON API for object reads. Currently, the -// default API used for reads is XML. -// Setting this option is required to use the GenerationNotMatch condition. -// -// Note that when this option is set, reads will return a zero date for -// [ReaderObjectAttrs].LastModified and may return a different value for -// [ReaderObjectAttrs].CacheControl. -func WithJSONReads() option.ClientOption { - return &withReadAPI{useJSON: true} -} - -// WithXMLReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the XML API for object reads. -// -// This is the current default. -func WithXMLReads() option.ClientOption { - return &withReadAPI{useJSON: false} -} - -type withReadAPI struct { - internaloption.EmbeddableAdapter - useJSON bool -} - -func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { - c.useJSONforReads = w.useJSON - c.readAPIWasSet = true -} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go deleted file mode 100644 index 6bc73fb7a..000000000 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" - "strings" - "time" -) - -// PostPolicyV4Options are used to construct a signed post policy. -// Please see https://cloud.google.com/storage/docs/xml-api/post-object -// for reference about the fields. -type PostPolicyV4Options struct { - // GoogleAccessID represents the authorizer of the signed post policy generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Exactly one of PrivateKey or SignBytes must be non-nil. - PrivateKey []byte - - // SignBytes is a function for implementing custom signing. - // - // Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined, - // SignBytes will be ignored. - // This SignBytes function expects the bytes it receives to be hashed, while - // SignRawBytes accepts the raw bytes without hashing, allowing more flexibility. - // Add the following to the top of your signing function to hash the bytes - // to use SignRawBytes instead: - // shaSum := sha256.Sum256(bytes) - // bytes = shaSum[:] - // - SignBytes func(hashBytes []byte) (signature []byte, err error) - - // SignRawBytes is a function for implementing custom signing. For example, if - // your application is running on Google App Engine, you can use - // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // &PostPolicyV4Options{ - // GoogleAccessID: acc, - // SignRawBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) - // - // SignRawBytes is equivalent to the SignBytes field on SignedURLOptions; - // that is, you may use the same signing function for the two. - // - // Exactly one of PrivateKey or SignRawBytes must be non-nil. - SignRawBytes func(bytes []byte) (signature []byte, err error) - - // Expires is the expiration time on the signed post policy. - // It must be a time in the future. - // Required. - Expires time.Time - - // Style provides options for the type of URL to use. Options are - // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See - // https://cloud.google.com/storage/docs/request-endpoints for details. - // Optional. - Style URLStyle - - // Insecure when set indicates that the generated URL's scheme - // will use "http" instead of "https" (default). - // Optional. - Insecure bool - - // Fields specifies the attributes of a PostPolicyV4 request. - // When Fields is non-nil, its attributes must match those that will - // passed into field Conditions. - // Optional. - Fields *PolicyV4Fields - - // The conditions that the uploaded file will be expected to conform to. - // When used, the failure of an upload to satisfy a condition will result in - // a 4XX status code, back with the message describing the problem. - // Optional. - Conditions []PostPolicyV4Condition - - // Hostname sets the host of the signed post policy. This field overrides - // any endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. - // Only compatible with PathStyle URLStyle. - // Optional. - Hostname string - - shouldHashSignBytes bool -} - -func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { - return &PostPolicyV4Options{ - GoogleAccessID: opts.GoogleAccessID, - PrivateKey: opts.PrivateKey, - SignBytes: opts.SignBytes, - SignRawBytes: opts.SignRawBytes, - Expires: opts.Expires, - Style: opts.Style, - Insecure: opts.Insecure, - Fields: opts.Fields, - Conditions: opts.Conditions, - shouldHashSignBytes: opts.shouldHashSignBytes, - Hostname: opts.Hostname, - } -} - -// PolicyV4Fields describes the attributes for a PostPolicyV4 request. -type PolicyV4Fields struct { - // ACL specifies the access control permissions for the object. - // Optional. - ACL string - // CacheControl specifies the caching directives for the object. - // Optional. - CacheControl string - // ContentType specifies the media type of the object. - // Optional. - ContentType string - // ContentDisposition specifies how the file will be served back to requesters. - // Optional. - ContentDisposition string - // ContentEncoding specifies the decompressive transcoding that the object. - // This field is complementary to ContentType in that the file could be - // compressed but ContentType specifies the file's original media type. - // Optional. - ContentEncoding string - // Metadata specifies custom metadata for the object. - // If any key doesn't begin with "x-goog-meta-", an error will be returned. - // Optional. - Metadata map[string]string - // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage - // will serve back on successful upload of the object. - // Optional. - StatusCodeOnSuccess int - // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage - // will serve back on successful upload of the object. - // Optional. - RedirectToURLOnSuccess string -} - -// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. -type PostPolicyV4 struct { - // URL is the generated URL that the file upload will be made to. - URL string - // Fields specifies the generated key-values that the file uploader - // must include in their multipart upload form. - Fields map[string]string -} - -// PostPolicyV4Condition describes the constraints that the subsequent -// object upload's multipart form fields will be expected to conform to. -type PostPolicyV4Condition interface { - isEmpty() bool - json.Marshaler -} - -type startsWith struct { - key, value string -} - -func (sw *startsWith) MarshalJSON() ([]byte, error) { - return json.Marshal([]string{"starts-with", sw.key, sw.value}) -} -func (sw *startsWith) isEmpty() bool { - return sw.value == "" -} - -// ConditionStartsWith checks that an attributes starts with value. -// An empty value will cause this condition to be ignored. -func ConditionStartsWith(key, value string) PostPolicyV4Condition { - return &startsWith{key, value} -} - -type contentLengthRangeCondition struct { - start, end uint64 -} - -func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) -} -func (clr *contentLengthRangeCondition) isEmpty() bool { - return clr.start == 0 && clr.end == 0 -} - -type singleValueCondition struct { - name, value string -} - -func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]string{svc.name: svc.value}) -} -func (svc *singleValueCondition) isEmpty() bool { - return svc.value == "" -} - -// ConditionContentLengthRange constraints the limits that the -// multipart upload's range header will be expected to be within. -func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { - return &contentLengthRangeCondition{start, end} -} - -func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { - return &singleValueCondition{"success_action_redirect", redirectURL} -} - -func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { - svc := &singleValueCondition{name: "success_action_status"} - if statusCode > 0 { - svc.value = fmt.Sprintf("%d", statusCode) - } - return svc -} - -// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. -// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. -// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4 -// method which uses the Client's credentials to handle authentication. -func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - if bucket == "" { - return nil, errors.New("storage: bucket must be non-empty") - } - if object == "" { - return nil, errors.New("storage: object must be non-empty") - } - now := utcNow() - if err := validatePostPolicyV4Options(opts, now); err != nil { - return nil, err - } - - var signingFn func(hashedBytes []byte) ([]byte, error) - switch { - case opts.SignRawBytes != nil: - signingFn = opts.SignRawBytes - case opts.shouldHashSignBytes: - signingFn = opts.SignBytes - case len(opts.PrivateKey) != 0: - parsedRSAPrivKey, err := parseKey(opts.PrivateKey) - if err != nil { - return nil, err - } - signingFn = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:]) - } - - default: - return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") - } - - var descFields PolicyV4Fields - if opts.Fields != nil { - descFields = *opts.Fields - } - - if err := validateMetadata(descFields.Metadata); err != nil { - return nil, err - } - - // Build the policy. - conds := make([]PostPolicyV4Condition, len(opts.Conditions)) - copy(conds, opts.Conditions) - conds = append(conds, - // These are ordered lexicographically. Technically the order doesn't matter - // for creating the policy, but we use this order to match the - // cross-language conformance tests for this feature. - &singleValueCondition{"acl", descFields.ACL}, - &singleValueCondition{"cache-control", descFields.CacheControl}, - &singleValueCondition{"content-disposition", descFields.ContentDisposition}, - &singleValueCondition{"content-encoding", descFields.ContentEncoding}, - &singleValueCondition{"content-type", descFields.ContentType}, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), - ) - - YYYYMMDD := now.Format(yearMonthDay) - policyFields := map[string]string{ - "key": object, - "x-goog-date": now.Format(iso8601), - "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", - "x-goog-algorithm": "GOOG4-RSA-SHA256", - "acl": descFields.ACL, - "cache-control": descFields.CacheControl, - "content-disposition": descFields.ContentDisposition, - "content-encoding": descFields.ContentEncoding, - "content-type": descFields.ContentType, - "success_action_redirect": descFields.RedirectToURLOnSuccess, - } - for key, value := range descFields.Metadata { - conds = append(conds, &singleValueCondition{key, value}) - policyFields[key] = value - } - - // Following from the order expected by the conformance test cases, - // hence manually inserting these fields in a specific order. - conds = append(conds, - &singleValueCondition{"bucket", bucket}, - &singleValueCondition{"key", object}, - &singleValueCondition{"x-goog-date", now.Format(iso8601)}, - &singleValueCondition{ - name: "x-goog-credential", - value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", - }, - &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, - ) - - nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) - for _, cond := range conds { - if cond == nil || !cond.isEmpty() { - nonEmptyConds = append(nonEmptyConds, cond) - } - } - condsAsJSON, err := json.Marshal(map[string]interface{}{ - "conditions": nonEmptyConds, - "expiration": opts.Expires.Format(time.RFC3339), - }) - if err != nil { - return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %w", err) - } - - b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) - var signature []byte - var signErr error - - if opts.shouldHashSignBytes { - // SignBytes expects hashed bytes as input instead of raw bytes, so we hash them - shaSum := sha256.Sum256([]byte(b64Policy)) - signature, signErr = signingFn(shaSum[:]) - } else { - signature, signErr = signingFn([]byte(b64Policy)) - } - if signErr != nil { - return nil, signErr - } - - policyFields["policy"] = b64Policy - policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) - - // Construct the URL. - scheme := "https" - if opts.Insecure { - scheme = "http" - } - path := opts.Style.path(bucket, "") + "/" - u := &url.URL{ - Path: path, - RawPath: pathEncodeV4(path), - Host: opts.Style.host(opts.Hostname, bucket), - Scheme: scheme, - } - - if descFields.StatusCodeOnSuccess > 0 { - policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) - } - - // Clear out fields with blanks values. - for key, value := range policyFields { - if value == "" { - delete(policyFields, key) - } - } - pp4 := &PostPolicyV4{ - Fields: policyFields, - URL: u.String(), - } - return pp4, nil -} - -// validatePostPolicyV4Options checks that: -// * GoogleAccessID is set -// * either PrivateKey or SignRawBytes/SignBytes is set, but not both -// * the deadline set in Expires is not in the past -// * if Style is not set, it'll use PathStyle -// * sets shouldHashSignBytes to true if opts.SignBytes should be used -func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { - if opts == nil || opts.GoogleAccessID == "" { - return errors.New("storage: missing required GoogleAccessID") - } - if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank { - return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set") - } - if opts.Expires.Before(now) { - return errors.New("storage: expecting Expires to be in the future") - } - if opts.Style == nil { - opts.Style = PathStyle() - } - if opts.SignRawBytes == nil && opts.SignBytes != nil { - opts.shouldHashSignBytes = true - } - return nil -} - -// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", -// otherwise it will return an error. -func validateMetadata(hdrs map[string]string) (err error) { - if len(hdrs) == 0 { - return nil - } - - badKeys := make([]string, 0, len(hdrs)) - for key := range hdrs { - if !strings.HasPrefix(key, "x-goog-meta-") { - badKeys = append(badKeys, key) - } - } - if len(badKeys) != 0 { - err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) - } - return -} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go deleted file mode 100644 index 4673a68d0..000000000 --- a/vendor/cloud.google.com/go/storage/reader.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "cloud.google.com/go/internal/trace" -) - -var crc32cTable = crc32.MakeTable(crc32.Castagnoli) - -// ReaderObjectAttrs are attributes about the object being read. These are populated -// during the New call. This struct only holds a subset of object attributes: to -// get the full set of attributes, use ObjectHandle.Attrs. -// -// Each field is read-only. -type ReaderObjectAttrs struct { - // Size is the length of the object's content. - Size int64 - - // StartOffset is the byte offset within the object - // from which reading begins. - // This value is only non-zero for range requests. - StartOffset int64 - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // CacheControl specifies whether and for how long browser and Internet - // caches are allowed to cache your objects. - CacheControl string - - // LastModified is the time that the object was last modified. - LastModified time.Time - - // Generation is the generation number of the object's content. - Generation int64 - - // Metageneration is the version of the metadata for this object at - // this generation. This field is used for preconditions and for - // detecting changes in metadata. A metageneration number is only - // meaningful in the context of a particular generation of a - // particular object. - Metageneration int64 -} - -// NewReader creates a new Reader to read the contents of the -// object. -// ErrObjectNotExist will be returned if the object is not found. -// -// The caller must call Close on the returned Reader when done reading. -func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { - return o.NewRangeReader(ctx, 0, -1) -} - -// NewRangeReader reads part of an object, reading at most length bytes -// starting at the given offset. If length is negative, the object is read -// until the end. If offset is negative, the object is read abs(offset) bytes -// from the end, and length must also be negative to indicate all remaining -// bytes will be read. -// -// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies -// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding -// that file will be served back whole, regardless of the requested range as -// Google Cloud Storage dictates. -func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { - // This span covers the life of the reader. It is closed via the context - // in Reader.Close. - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Reader") - - if err := o.validate(); err != nil { - return nil, err - } - if offset < 0 && length >= 0 { - return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset) - } - if o.conds != nil { - if err := o.conds.validate("NewRangeReader"); err != nil { - return nil, err - } - } - - opts := makeStorageOpts(true, o.retry, o.userProject) - - params := &newRangeReaderParams{ - bucket: o.bucket, - object: o.object, - gen: o.gen, - offset: offset, - length: length, - encryptionKey: o.encryptionKey, - conds: o.conds, - readCompressed: o.readCompressed, - } - - r, err = o.c.tc.NewRangeReader(ctx, params, opts...) - - // Pass the context so that the span can be closed in Reader.Close, or close the - // span now if there is an error. - if err == nil { - r.ctx = ctx - } else { - trace.EndSpan(ctx, err) - } - - return r, err -} - -// decompressiveTranscoding returns true if the request was served decompressed -// and different than its original storage form. This happens when the "Content-Encoding" -// header is "gzip". -// See: -// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip -// - https://github.com/googleapis/google-cloud-go/issues/1800 -func decompressiveTranscoding(res *http.Response) bool { - // Decompressive Transcoding. - return res.Header.Get("Content-Encoding") == "gzip" || - res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" -} - -func uncompressedByServer(res *http.Response) bool { - // If the data is stored as gzip but is not encoded as gzip, then it - // was uncompressed by the server. - return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && - res.Header.Get("Content-Encoding") != "gzip" -} - -// parseCRC32c parses the crc32c hash from the X-Goog-Hash header. -// It can parse headers in the form [crc32c=xxx md5=xxx] (XML responses) or the -// form [crc32c=xxx,md5=xxx] (JSON responses). The md5 hash is ignored. -func parseCRC32c(res *http.Response) (uint32, bool) { - const prefix = "crc32c=" - for _, spec := range res.Header["X-Goog-Hash"] { - values := strings.Split(spec, ",") - - for _, v := range values { - if strings.HasPrefix(v, prefix) { - c, err := decodeUint32(v[len(prefix):]) - if err == nil { - return c, true - } - } - } - - } - return 0, false -} - -// setConditionsHeaders sets precondition request headers for downloads -// using the XML API. It assumes that the conditions have been validated. -func setConditionsHeaders(headers http.Header, conds *Conditions) error { - if conds == nil { - return nil - } - if conds.MetagenerationMatch != 0 { - headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch)) - } - switch { - case conds.GenerationMatch != 0: - headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch)) - case conds.DoesNotExist: - headers.Set("x-goog-if-generation-match", "0") - } - return nil -} - -var emptyBody = ioutil.NopCloser(strings.NewReader("")) - -// Reader reads a Cloud Storage object. -// It implements io.Reader. -// -// Typically, a Reader computes the CRC of the downloaded content and compares it to -// the stored CRC, returning an error from Read if there is a mismatch. This integrity check -// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. -type Reader struct { - Attrs ReaderObjectAttrs - seen, remain, size int64 - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc - - reader io.ReadCloser - ctx context.Context -} - -// Close closes the Reader. It must be called when done reading. -func (r *Reader) Close() error { - err := r.reader.Close() - trace.EndSpan(r.ctx, err) - return err -} - -func (r *Reader) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if r.remain != -1 { - r.remain -= int64(n) - } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if err == io.EOF { - if r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } - } - return n, err -} - -// Size returns the size of the object in bytes. -// The returned value is always the same and is not affected by -// calls to Read or Close. -// -// Deprecated: use Reader.Attrs.Size. -func (r *Reader) Size() int64 { - return r.Attrs.Size -} - -// Remain returns the number of bytes left to read, or -1 if unknown. -func (r *Reader) Remain() int64 { - return r.remain -} - -// ContentType returns the content type of the object. -// -// Deprecated: use Reader.Attrs.ContentType. -func (r *Reader) ContentType() string { - return r.Attrs.ContentType -} - -// ContentEncoding returns the content encoding of the object. -// -// Deprecated: use Reader.Attrs.ContentEncoding. -func (r *Reader) ContentEncoding() string { - return r.Attrs.ContentEncoding -} - -// CacheControl returns the cache control of the object. -// -// Deprecated: use Reader.Attrs.CacheControl. -func (r *Reader) CacheControl() string { - return r.Attrs.CacheControl -} - -// LastModified returns the value of the Last-Modified header. -// -// Deprecated: use Reader.Attrs.LastModified. -func (r *Reader) LastModified() (time.Time, error) { - return r.Attrs.LastModified, nil -} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go deleted file mode 100644 index 49e8cb3db..000000000 --- a/vendor/cloud.google.com/go/storage/storage.go +++ /dev/null @@ -1,2378 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "bytes" - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" - - "cloud.google.com/go/internal/optional" - "cloud.google.com/go/internal/trace" - "cloud.google.com/go/storage/internal" - "cloud.google.com/go/storage/internal/apiv2/storagepb" - "github.com/googleapis/gax-go/v2" - "golang.org/x/oauth2/google" - "google.golang.org/api/googleapi" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - raw "google.golang.org/api/storage/v1" - "google.golang.org/api/transport" - htransport "google.golang.org/api/transport/http" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/known/fieldmaskpb" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// Methods which can be used in signed URLs. -var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true} - -var ( - // ErrBucketNotExist indicates that the bucket does not exist. - ErrBucketNotExist = errors.New("storage: bucket doesn't exist") - // ErrObjectNotExist indicates that the object does not exist. - ErrObjectNotExist = errors.New("storage: object doesn't exist") - // errMethodNotSupported indicates that the method called is not currently supported by the client. - // TODO: Export this error when launching the transport-agnostic client. - errMethodNotSupported = errors.New("storage: method is not currently supported") - // errMethodNotValid indicates that given HTTP method is not valid. - errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) -) - -var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version) - -const ( - // ScopeFullControl grants permissions to manage your - // data and permissions in Google Cloud Storage. - ScopeFullControl = raw.DevstorageFullControlScope - - // ScopeReadOnly grants permissions to - // view your data in Google Cloud Storage. - ScopeReadOnly = raw.DevstorageReadOnlyScope - - // ScopeReadWrite grants permissions to manage your - // data in Google Cloud Storage. - ScopeReadWrite = raw.DevstorageReadWriteScope - - // aes256Algorithm is the AES256 encryption algorithm used with the - // Customer-Supplied Encryption Keys feature. - aes256Algorithm = "AES256" - - // defaultGen indicates the latest object generation by default, - // using a negative value. - defaultGen = int64(-1) -) - -// TODO: remove this once header with invocation ID is applied to all methods. -func setClientHeader(headers http.Header) { - headers.Set("x-goog-api-client", xGoogDefaultHeader) -} - -// Client is a client for interacting with Google Cloud Storage. -// -// Clients should be reused instead of created as needed. -// The methods of Client are safe for concurrent use by multiple goroutines. -type Client struct { - hc *http.Client - raw *raw.Service - // Scheme describes the scheme under the current host. - scheme string - // xmlHost is the default host used for XML requests. - xmlHost string - // May be nil. - creds *google.Credentials - retry *retryConfig - - // tc is the transport-agnostic client implemented with either gRPC or HTTP. - tc storageClient - // useGRPC flags whether the client uses gRPC. This is needed while the - // integration piece is only partially complete. - // TODO: remove before merging to main. - useGRPC bool -} - -// NewClient creates a new Google Cloud Storage client using the HTTP transport. -// The default scope is ScopeFullControl. To use a different scope, like -// ScopeReadOnly, use option.WithScopes. -// -// Clients should be reused instead of created as needed. The methods of Client -// are safe for concurrent use by multiple goroutines. -// -// You may configure the client by passing in options from the [google.golang.org/api/option] -// package. You may also use options defined in this package, such as [WithJSONReads]. -func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - var creds *google.Credentials - - // In general, it is recommended to use raw.NewService instead of htransport.NewClient - // since raw.NewService configures the correct default endpoints when initializing the - // internal http client. However, in our case, "NewRangeReader" in reader.go needs to - // access the http client directly to make requests, so we create the client manually - // here so it can be re-used by both reader.go and raw.NewService. This means we need to - // manually configure the default endpoint options on the http client. Furthermore, we - // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { - // Prepend default options to avoid overriding options passed by the user. - opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) - - opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), - internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), - internaloption.WithDefaultUniverseDomain("googleapis.com"), - ) - - // Don't error out here. The user may have passed in their own HTTP - // client which does not auth with ADC or other common conventions. - c, err := transport.Creds(ctx, opts...) - if err == nil { - creds = c - opts = append(opts, internaloption.WithCredentials(creds)) - } - } else { - var hostURL *url.URL - - if strings.Contains(host, "://") { - h, err := url.Parse(host) - if err != nil { - return nil, err - } - hostURL = h - } else { - // Add scheme for user if not supplied in STORAGE_EMULATOR_HOST - // URL is only parsed correctly if it has a scheme, so we build it ourselves - hostURL = &url.URL{Scheme: "http", Host: host} - } - - hostURL.Path = "storage/v1/" - endpoint := hostURL.String() - - // Append the emulator host as default endpoint for the user - opts = append([]option.ClientOption{ - option.WithoutAuthentication(), - internaloption.SkipDialSettingsValidation(), - internaloption.WithDefaultEndpoint(endpoint), - internaloption.WithDefaultMTLSEndpoint(endpoint), - }, opts...) - } - - // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. - hc, ep, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, fmt.Errorf("dialing: %w", err) - } - // RawService should be created with the chosen endpoint to take account of user override. - rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) - if err != nil { - return nil, fmt.Errorf("storage client: %w", err) - } - // Update xmlHost and scheme with the chosen endpoint. - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) - } - - tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...)) - if err != nil { - return nil, fmt.Errorf("storage: %w", err) - } - - return &Client{ - hc: hc, - raw: rawService, - scheme: u.Scheme, - xmlHost: u.Host, - creds: creds, - tc: tc, - }, nil -} - -// NewGRPCClient creates a new Storage client using the gRPC transport and API. -// Client methods which have not been implemented in gRPC will return an error. -// In particular, methods for Cloud Pub/Sub notifications are not supported. -// Using a non-default universe domain is also not supported with the Storage -// gRPC client. -// -// The storage gRPC API is still in preview and not yet publicly available. -// If you would like to use the API, please first contact your GCP account rep to -// request access. The API may be subject to breaking changes. -// -// Clients should be reused instead of created as needed. The methods of Client -// are safe for concurrent use by multiple goroutines. -// -// You may configure the client by passing in options from the [google.golang.org/api/option] -// package. -func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - opts = append(defaultGRPCOptions(), opts...) - tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) - if err != nil { - return nil, err - } - - return &Client{tc: tc, useGRPC: true}, nil -} - -// Close closes the Client. -// -// Close need not be called at program exit. -func (c *Client) Close() error { - // Set fields to nil so that subsequent uses will panic. - c.hc = nil - c.raw = nil - c.creds = nil - if c.tc != nil { - return c.tc.Close() - } - return nil -} - -// SigningScheme determines the API version to use when signing URLs. -type SigningScheme int - -const ( - // SigningSchemeDefault is presently V2 and will change to V4 in the future. - SigningSchemeDefault SigningScheme = iota - - // SigningSchemeV2 uses the V2 scheme to sign URLs. - SigningSchemeV2 - - // SigningSchemeV4 uses the V4 scheme to sign URLs. - SigningSchemeV4 -) - -// URLStyle determines the style to use for the signed URL. PathStyle is the -// default. All non-default options work with V4 scheme only. See -// https://cloud.google.com/storage/docs/request-endpoints for details. -type URLStyle interface { - // host should return the host portion of the signed URL, not including - // the scheme (e.g. storage.googleapis.com). - host(hostname, bucket string) string - - // path should return the path portion of the signed URL, which may include - // both the bucket and object name or only the object name depending on the - // style. - path(bucket, object string) string -} - -type pathStyle struct{} - -type virtualHostedStyle struct{} - -type bucketBoundHostname struct { - hostname string -} - -func (s pathStyle) host(hostname, bucket string) string { - if hostname != "" { - return stripScheme(hostname) - } - - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - return stripScheme(host) - } - - return "storage.googleapis.com" -} - -func (s virtualHostedStyle) host(hostname, bucket string) string { - if hostname != "" { - return bucket + "." + stripScheme(hostname) - } - - if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - return bucket + "." + stripScheme(host) - } - - return bucket + ".storage.googleapis.com" -} - -func (s bucketBoundHostname) host(_, bucket string) string { - return s.hostname -} - -func (s pathStyle) path(bucket, object string) string { - p := bucket - if object != "" { - p += "/" + object - } - return p -} - -func (s virtualHostedStyle) path(bucket, object string) string { - return object -} - -func (s bucketBoundHostname) path(bucket, object string) string { - return object -} - -// PathStyle is the default style, and will generate a URL of the form -// "//". By default, is -// storage.googleapis.com, but setting an endpoint on the storage Client or -// through STORAGE_EMULATOR_HOST overrides this. Setting Hostname on -// SignedURLOptions or PostPolicyV4Options overrides everything else. -func PathStyle() URLStyle { - return pathStyle{} -} - -// VirtualHostedStyle generates a URL relative to the bucket's virtual -// hostname, e.g. ".storage.googleapis.com/". -func VirtualHostedStyle() URLStyle { - return virtualHostedStyle{} -} - -// BucketBoundHostname generates a URL with a custom hostname tied to a -// specific GCS bucket. The desired hostname should be passed in using the -// hostname argument. Generated urls will be of the form -// "/". See -// https://cloud.google.com/storage/docs/request-endpoints#cname and -// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers -// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must -// be set to true. -func BucketBoundHostname(hostname string) URLStyle { - return bucketBoundHostname{hostname: hostname} -} - -// Strips the scheme from a host if it contains it -func stripScheme(host string) string { - if strings.Contains(host, "://") { - host = strings.SplitN(host, "://", 2)[1] - } - return host -} - -// SignedURLOptions allows you to restrict the access to the signed URL. -type SignedURLOptions struct { - // GoogleAccessID represents the authorizer of the signed URL generation. - // It is typically the Google service account client email address from - // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". - // Required. - GoogleAccessID string - - // PrivateKey is the Google service account private key. It is obtainable - // from the Google Developers Console. - // At https://console.developers.google.com/project//apiui/credential, - // create a service account client ID or reuse one of your existing service account - // credentials. Click on the "Generate new P12 key" to generate and download - // a new private key. Once you download the P12 file, use the following command - // to convert it into a PEM file. - // - // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes - // - // Provide the contents of the PEM file as a byte slice. - // Exactly one of PrivateKey or SignBytes must be non-nil. - PrivateKey []byte - - // SignBytes is a function for implementing custom signing. For example, if - // your application is running on Google App Engine, you can use - // appengine's internal signing function: - // ctx := appengine.NewContext(request) - // acc, _ := appengine.ServiceAccount(ctx) - // url, err := SignedURL("bucket", "object", &SignedURLOptions{ - // GoogleAccessID: acc, - // SignBytes: func(b []byte) ([]byte, error) { - // _, signedBytes, err := appengine.SignBytes(ctx, b) - // return signedBytes, err - // }, - // // etc. - // }) - // - // Exactly one of PrivateKey or SignBytes must be non-nil. - SignBytes func([]byte) ([]byte, error) - - // Method is the HTTP method to be used with the signed URL. - // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. - // Required. - Method string - - // Expires is the expiration time on the signed URL. It must be - // a datetime in the future. For SigningSchemeV4, the expiration may be no - // more than seven days in the future. - // Required. - Expires time.Time - - // ContentType is the content type header the client must provide - // to use the generated signed URL. - // Optional. - ContentType string - - // Headers is a list of extension headers the client must provide - // in order to use the generated signed URL. Each must be a string of the - // form "key:values", with multiple values separated by a semicolon. - // Optional. - Headers []string - - // QueryParameters is a map of additional query parameters. When - // SigningScheme is V4, this is used in computing the signature, and the - // client must use the same query parameters when using the generated signed - // URL. - // Optional. - QueryParameters url.Values - - // MD5 is the base64 encoded MD5 checksum of the file. - // If provided, the client should provide the exact value on the request - // header in order to use the signed URL. - // Optional. - MD5 string - - // Style provides options for the type of URL to use. Options are - // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See - // https://cloud.google.com/storage/docs/request-endpoints for details. - // Only supported for V4 signing. - // Optional. - Style URLStyle - - // Insecure determines whether the signed URL should use HTTPS (default) or - // HTTP. - // Only supported for V4 signing. - // Optional. - Insecure bool - - // Scheme determines the version of URL signing to use. Default is - // SigningSchemeV2. - Scheme SigningScheme - - // Hostname sets the host of the signed URL. This field overrides any - // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. - // Only compatible with PathStyle and VirtualHostedStyle URLStyles. - // Optional. - Hostname string -} - -func (opts *SignedURLOptions) clone() *SignedURLOptions { - return &SignedURLOptions{ - GoogleAccessID: opts.GoogleAccessID, - SignBytes: opts.SignBytes, - PrivateKey: opts.PrivateKey, - Method: opts.Method, - Expires: opts.Expires, - ContentType: opts.ContentType, - Headers: opts.Headers, - QueryParameters: opts.QueryParameters, - MD5: opts.MD5, - Style: opts.Style, - Insecure: opts.Insecure, - Scheme: opts.Scheme, - Hostname: opts.Hostname, - } -} - -var ( - tabRegex = regexp.MustCompile(`[\t]+`) - // I was tempted to call this spacex. :) - spaceRegex = regexp.MustCompile(` +`) - - canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) - excludedCanonicalHeaders = map[string]bool{ - "x-goog-encryption-key": true, - "x-goog-encryption-key-sha256": true, - } -) - -// v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers -func v2SanitizeHeaders(hdrs []string) []string { - headerMap := map[string][]string{} - for _, hdr := range hdrs { - // No leading or trailing whitespaces. - sanitizedHeader := strings.TrimSpace(hdr) - - var header, value string - // Only keep canonical headers, discard any others. - headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) - if len(headerMatches) == 0 { - continue - } - header = headerMatches[1] - value = headerMatches[2] - - header = strings.ToLower(strings.TrimSpace(header)) - value = strings.TrimSpace(value) - - if excludedCanonicalHeaders[header] { - // Do not keep any deliberately excluded canonical headers when signing. - continue - } - - if len(value) > 0 { - // Remove duplicate headers by appending the values of duplicates - // in their order of appearance. - headerMap[header] = append(headerMap[header], value) - } - } - - var sanitizedHeaders []string - for header, values := range headerMap { - // There should be no spaces around the colon separating the header name - // from the header value or around the values themselves. The values - // should be separated by commas. - // - // NOTE: The semantics for headers without a value are not clear. - // However from specifications these should be edge-cases anyway and we - // should assume that there will be no canonical headers using empty - // values. Any such headers are discarded at the regexp stage above. - sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) - } - sort.Strings(sanitizedHeaders) - return sanitizedHeaders -} - -// v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. -// -// V4 does a couple things differently from V2: -// - Headers get sorted by key, instead of by key:value. We do this in -// signedURLV4. -// - There's no canonical regexp: we simply split headers on :. -// - We don't exclude canonical headers. -// - We replace leading and trailing spaces in header values, like v2, but also -// all intermediate space duplicates get stripped. That is, there's only ever -// a single consecutive space. -func v4SanitizeHeaders(hdrs []string) []string { - headerMap := map[string][]string{} - for _, hdr := range hdrs { - // No leading or trailing whitespaces. - sanitizedHeader := strings.TrimSpace(hdr) - - var key, value string - headerMatches := strings.SplitN(sanitizedHeader, ":", 2) - if len(headerMatches) < 2 { - continue - } - - key = headerMatches[0] - value = headerMatches[1] - - key = strings.ToLower(strings.TrimSpace(key)) - value = strings.TrimSpace(value) - value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) - value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) - - if len(value) > 0 { - // Remove duplicate headers by appending the values of duplicates - // in their order of appearance. - headerMap[key] = append(headerMap[key], value) - } - } - - var sanitizedHeaders []string - for header, values := range headerMap { - // There should be no spaces around the colon separating the header name - // from the header value or around the values themselves. The values - // should be separated by commas. - // - // NOTE: The semantics for headers without a value are not clear. - // However from specifications these should be edge-cases anyway and we - // should assume that there will be no canonical headers using empty - // values. Any such headers are discarded at the regexp stage above. - sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) - } - return sanitizedHeaders -} - -// SignedURL returns a URL for the specified object. Signed URLs allow anyone -// access to a restricted resource for a limited time without needing a -// Google account or signing in. For more information about signed URLs, see -// https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication -// If initializing a Storage Client, instead use the Bucket.SignedURL method -// which uses the Client's credentials to handle authentication. -func SignedURL(bucket, object string, opts *SignedURLOptions) (string, error) { - now := utcNow() - if err := validateOptions(opts, now); err != nil { - return "", err - } - - switch opts.Scheme { - case SigningSchemeV2: - opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, object, opts) - case SigningSchemeV4: - opts.Headers = v4SanitizeHeaders(opts.Headers) - return signedURLV4(bucket, object, opts, now) - default: // SigningSchemeDefault - opts.Headers = v2SanitizeHeaders(opts.Headers) - return signedURLV2(bucket, object, opts) - } -} - -func validateOptions(opts *SignedURLOptions, now time.Time) error { - if opts == nil { - return errors.New("storage: missing required SignedURLOptions") - } - if opts.GoogleAccessID == "" { - return errors.New("storage: missing required GoogleAccessID") - } - if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { - return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") - } - opts.Method = strings.ToUpper(opts.Method) - if _, ok := signedURLMethods[opts.Method]; !ok { - return errMethodNotValid - } - if opts.Expires.IsZero() { - return errors.New("storage: missing required expires option") - } - if opts.MD5 != "" { - md5, err := base64.StdEncoding.DecodeString(opts.MD5) - if err != nil || len(md5) != 16 { - return errors.New("storage: invalid MD5 checksum") - } - } - if opts.Style == nil { - opts.Style = PathStyle() - } - if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 { - return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2") - } - if opts.Scheme == SigningSchemeV4 { - cutoff := now.Add(604801 * time.Second) // 7 days + 1 second - if !opts.Expires.Before(cutoff) { - return errors.New("storage: expires must be within seven days from now") - } - } - return nil -} - -const ( - iso8601 = "20060102T150405Z" - yearMonthDay = "20060102" -) - -// utcNow returns the current time in UTC and is a variable to allow for -// reassignment in tests to provide deterministic signed URL values. -var utcNow = func() time.Time { - return time.Now().UTC() -} - -// extractHeaderNames takes in a series of key:value headers and returns the -// header names only. -func extractHeaderNames(kvs []string) []string { - var res []string - for _, header := range kvs { - nameValue := strings.SplitN(header, ":", 2) - res = append(res, nameValue[0]) - } - return res -} - -// pathEncodeV4 creates an encoded string that matches the v4 signature spec. -// Following the spec precisely is necessary in order to ensure that the URL -// and signing string are correctly formed, and Go's url.PathEncode and -// url.QueryEncode don't generate an exact match without some additional logic. -func pathEncodeV4(path string) string { - segments := strings.Split(path, "/") - var encodedSegments []string - for _, s := range segments { - encodedSegments = append(encodedSegments, url.QueryEscape(s)) - } - encodedStr := strings.Join(encodedSegments, "/") - encodedStr = strings.Replace(encodedStr, "+", "%20", -1) - return encodedStr -} - -// signedURLV4 creates a signed URL using the sigV4 algorithm. -func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%s\n", opts.Method) - - u := &url.URL{Path: opts.Style.path(bucket, name)} - u.RawPath = pathEncodeV4(u.Path) - - // Note: we have to add a / here because GCS does so auto-magically, despite - // our encoding not doing so (and we have to exactly match their - // canonical query). - fmt.Fprintf(buf, "/%s\n", u.RawPath) - - headerNames := append(extractHeaderNames(opts.Headers), "host") - if opts.ContentType != "" { - headerNames = append(headerNames, "content-type") - } - if opts.MD5 != "" { - headerNames = append(headerNames, "content-md5") - } - sort.Strings(headerNames) - signedHeaders := strings.Join(headerNames, ";") - timestamp := now.Format(iso8601) - credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) - canonicalQueryString := url.Values{ - "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, - "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, - "X-Goog-Date": {timestamp}, - "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, - "X-Goog-SignedHeaders": {signedHeaders}, - } - // Add user-supplied query parameters to the canonical query string. For V4, - // it's necessary to include these. - for k, v := range opts.QueryParameters { - canonicalQueryString[k] = append(canonicalQueryString[k], v...) - } - // url.Values.Encode escaping is correct, except that a space must be replaced - // by `%20` rather than `+`. - escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) - fmt.Fprintf(buf, "%s\n", escapedQuery) - - // Fill in the hostname based on the desired URL style. - u.Host = opts.Style.host(opts.Hostname, bucket) - - // Fill in the URL scheme. - if opts.Insecure { - u.Scheme = "http" - } else { - u.Scheme = "https" - } - - var headersWithValue []string - headersWithValue = append(headersWithValue, "host:"+u.Hostname()) - headersWithValue = append(headersWithValue, opts.Headers...) - if opts.ContentType != "" { - headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) - } - if opts.MD5 != "" { - headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5) - } - // Trim extra whitespace from headers and replace with a single space. - var trimmedHeaders []string - for _, h := range headersWithValue { - trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " ")) - } - canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n") - fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) - fmt.Fprintf(buf, "%s\n", signedHeaders) - - // If the user provides a value for X-Goog-Content-SHA256, we must use - // that value in the request string. If not, we use UNSIGNED-PAYLOAD. - sha256Header := false - for _, h := range trimmedHeaders { - if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") { - sha256Header = true - fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1]) - break - } - } - if !sha256Header { - fmt.Fprint(buf, "UNSIGNED-PAYLOAD") - } - - sum := sha256.Sum256(buf.Bytes()) - hexDigest := hex.EncodeToString(sum[:]) - signBuf := &bytes.Buffer{} - fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") - fmt.Fprintf(signBuf, "%s\n", timestamp) - fmt.Fprintf(signBuf, "%s\n", credentialScope) - fmt.Fprintf(signBuf, "%s", hexDigest) - - signBytes := opts.SignBytes - if opts.PrivateKey != nil { - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - signBytes = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - sum[:], - ) - } - } - b, err := signBytes(signBuf.Bytes()) - if err != nil { - return "", err - } - signature := hex.EncodeToString(b) - canonicalQueryString.Set("X-Goog-Signature", string(signature)) - u.RawQuery = canonicalQueryString.Encode() - return u.String(), nil -} - -// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header -// key. -func sortHeadersByKey(hdrs []string) []string { - headersMap := map[string]string{} - var headersKeys []string - for _, h := range hdrs { - parts := strings.SplitN(h, ":", 2) - k := parts[0] - v := parts[1] - headersMap[k] = v - headersKeys = append(headersKeys, k) - } - sort.Strings(headersKeys) - var sorted []string - for _, k := range headersKeys { - v := headersMap[k] - sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) - } - return sorted -} - -func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { - signBytes := opts.SignBytes - if opts.PrivateKey != nil { - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err - } - signBytes = func(b []byte) ([]byte, error) { - sum := sha256.Sum256(b) - return rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - sum[:], - ) - } - } - - u := &url.URL{ - Path: fmt.Sprintf("/%s/%s", bucket, name), - } - - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%s\n", opts.Method) - fmt.Fprintf(buf, "%s\n", opts.MD5) - fmt.Fprintf(buf, "%s\n", opts.ContentType) - fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) - if len(opts.Headers) > 0 { - fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) - } - fmt.Fprintf(buf, "%s", u.String()) - - b, err := signBytes(buf.Bytes()) - if err != nil { - return "", err - } - encoded := base64.StdEncoding.EncodeToString(b) - u.Scheme = "https" - u.Host = PathStyle().host(opts.Hostname, bucket) - q := u.Query() - q.Set("GoogleAccessId", opts.GoogleAccessID) - q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) - q.Set("Signature", string(encoded)) - u.RawQuery = q.Encode() - return u.String(), nil -} - -// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. -// Use BucketHandle.Object to get a handle. -type ObjectHandle struct { - c *Client - bucket string - object string - acl ACLHandle - gen int64 // a negative value indicates latest - conds *Conditions - encryptionKey []byte // AES-256 key - userProject string // for requester-pays buckets - readCompressed bool // Accept-Encoding: gzip - retry *retryConfig - overrideRetention *bool -} - -// ACL provides access to the object's access control list. -// This controls who can read and write this object. -// This call does not perform any network operations. -func (o *ObjectHandle) ACL() *ACLHandle { - return &o.acl -} - -// Generation returns a new ObjectHandle that operates on a specific generation -// of the object. -// By default, the handle operates on the latest generation. Not -// all operations work when given a specific generation; check the API -// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. -func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { - o2 := *o - o2.gen = gen - return &o2 -} - -// If returns a new ObjectHandle that applies a set of preconditions. -// Preconditions already set on the ObjectHandle are ignored. The supplied -// Conditions must have at least one field set to a non-default value; -// otherwise an error will be returned from any operation on the ObjectHandle. -// Operations on the new handle will return an error if the preconditions are not -// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions -// for more details. -func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { - o2 := *o - o2.conds = &conds - return &o2 -} - -// Key returns a new ObjectHandle that uses the supplied encryption -// key to encrypt and decrypt the object's contents. -// -// Encryption key must be a 32-byte AES-256 key. -// See https://cloud.google.com/storage/docs/encryption for details. -func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { - o2 := *o - o2.encryptionKey = encryptionKey - return &o2 -} - -// Attrs returns meta information about the object. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") - defer func() { trace.EndSpan(ctx, err) }() - - if err := o.validate(); err != nil { - return nil, err - } - opts := makeStorageOpts(true, o.retry, o.userProject) - return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...) -} - -// Update updates an object with the provided attributes. See -// ObjectAttrsToUpdate docs for details on treatment of zero values. -// ErrObjectNotExist will be returned if the object is not found. -func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") - defer func() { trace.EndSpan(ctx, err) }() - - if err := o.validate(); err != nil { - return nil, err - } - isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 - opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) - return o.c.tc.UpdateObject(ctx, - &updateObjectParams{ - bucket: o.bucket, - object: o.object, - uattrs: &uattrs, - gen: o.gen, - encryptionKey: o.encryptionKey, - conds: o.conds, - overrideRetention: o.overrideRetention}, opts...) -} - -// BucketName returns the name of the bucket. -func (o *ObjectHandle) BucketName() string { - return o.bucket -} - -// ObjectName returns the name of the object. -func (o *ObjectHandle) ObjectName() string { - return o.object -} - -// ObjectAttrsToUpdate is used to update the attributes of an object. -// Only fields set to non-nil values will be updated. -// For all fields except CustomTime and Retention, set the field to its zero -// value to delete it. CustomTime cannot be deleted or changed to an earlier -// time once set. Retention can be deleted (only if the Mode is Unlocked) by -// setting it to an empty value (not nil). -// -// For example, to change ContentType and delete ContentEncoding, Metadata and -// Retention, use: -// -// ObjectAttrsToUpdate{ -// ContentType: "text/html", -// ContentEncoding: "", -// Metadata: map[string]string{}, -// Retention: &ObjectRetention{}, -// } -type ObjectAttrsToUpdate struct { - EventBasedHold optional.Bool - TemporaryHold optional.Bool - ContentType optional.String - ContentLanguage optional.String - ContentEncoding optional.String - ContentDisposition optional.String - CacheControl optional.String - CustomTime time.Time // Cannot be deleted or backdated from its current value. - Metadata map[string]string // Set to map[string]string{} to delete. - ACL []ACLRule - - // If not empty, applies a predefined set of access controls. ACL must be nil. - // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. - PredefinedACL string - - // Retention contains the retention configuration for this object. - // Operations other than setting the retention for the first time or - // extending the RetainUntil time on the object retention must be done - // on an ObjectHandle with OverrideUnlockedRetention set to true. - Retention *ObjectRetention -} - -// Delete deletes the single specified object. -func (o *ObjectHandle) Delete(ctx context.Context) error { - if err := o.validate(); err != nil { - return err - } - // Delete is idempotent if GenerationMatch or Generation have been passed in. - // The default generation is negative to get the latest version of the object. - isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 - opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) - return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...) -} - -// ReadCompressed when true causes the read to happen without decompressing. -func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { - o2 := *o - o2.readCompressed = compressed - return &o2 -} - -// OverrideUnlockedRetention provides an option for overriding an Unlocked -// Retention policy. This must be set to true in order to change a policy -// from Unlocked to Locked, to set it to null, or to reduce its -// RetainUntil attribute. It is not required for setting the ObjectRetention for -// the first time nor for extending the RetainUntil time. -func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { - o2 := *o - o2.overrideRetention = &override - return &o2 -} - -// NewWriter returns a storage Writer that writes to the GCS object -// associated with this ObjectHandle. -// -// A new object will be created unless an object with this name already exists. -// Otherwise any previous object with the same name will be replaced. -// The object will not be available (and any previous object will remain) -// until Close has been called. -// -// Attributes can be set on the object by modifying the returned Writer's -// ObjectAttrs field before the first call to Write. If no ContentType -// attribute is specified, the content type will be automatically sniffed -// using net/http.DetectContentType. -// -// Note that each Writer allocates an internal buffer of size Writer.ChunkSize. -// See the ChunkSize docs for more information. -// -// It is the caller's responsibility to call Close when writing is done. To -// stop writing without saving the data, cancel the context. -func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer") - return &Writer{ - ctx: ctx, - o: o, - donec: make(chan struct{}), - ObjectAttrs: ObjectAttrs{Name: o.object}, - ChunkSize: googleapi.DefaultUploadChunkSize, - } -} - -func (o *ObjectHandle) validate() error { - if o.bucket == "" { - return errors.New("storage: bucket name is empty") - } - if o.object == "" { - return errors.New("storage: object name is empty") - } - if !utf8.ValidString(o.object) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) - } - return nil -} - -// parseKey converts the binary contents of a private key file to an -// *rsa.PrivateKey. It detects whether the private key is in a PEM container or -// not. If so, it extracts the private key from PEM container before -// conversion. It only supports PEM containers with no passphrase. -func parseKey(key []byte) (*rsa.PrivateKey, error) { - if block, _ := pem.Decode(key); block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, err - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("oauth2: private key is invalid") - } - return parsed, nil -} - -// toRawObject copies the editable attributes from o to the raw library's Object type. -func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { - var ret string - if !o.RetentionExpirationTime.IsZero() { - ret = o.RetentionExpirationTime.Format(time.RFC3339) - } - var ct string - if !o.CustomTime.IsZero() { - ct = o.CustomTime.Format(time.RFC3339) - } - return &raw.Object{ - Bucket: bucket, - Name: o.Name, - EventBasedHold: o.EventBasedHold, - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: ret, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - StorageClass: o.StorageClass, - Acl: toRawObjectACL(o.ACL), - Metadata: o.Metadata, - CustomTime: ct, - Retention: o.Retention.toRawObjectRetention(), - } -} - -// toProtoObject copies the editable attributes from o to the proto library's Object type. -func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object { - // For now, there are only globally unique buckets, and "_" is the alias - // project ID for such buckets. If the bucket is not provided, like in the - // destination ObjectAttrs of a Copy, do not attempt to format it. - if b != "" { - b = bucketResourceName(globalProjectAlias, b) - } - - return &storagepb.Object{ - Bucket: b, - Name: o.Name, - EventBasedHold: proto.Bool(o.EventBasedHold), - TemporaryHold: o.TemporaryHold, - ContentType: o.ContentType, - ContentEncoding: o.ContentEncoding, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - ContentDisposition: o.ContentDisposition, - StorageClass: o.StorageClass, - Acl: toProtoObjectACL(o.ACL), - Metadata: o.Metadata, - CreateTime: toProtoTimestamp(o.Created), - CustomTime: toProtoTimestamp(o.CustomTime), - DeleteTime: toProtoTimestamp(o.Deleted), - RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime), - UpdateTime: toProtoTimestamp(o.Updated), - KmsKey: o.KMSKeyName, - Generation: o.Generation, - Size: o.Size, - } -} - -// toProtoObject copies the attributes to update from uattrs to the proto library's Object type. -func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object { - o := &storagepb.Object{ - Name: object, - Bucket: bucket, - } - if uattrs == nil { - return o - } - - if uattrs.EventBasedHold != nil { - o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold)) - } - if uattrs.TemporaryHold != nil { - o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) - } - if uattrs.ContentType != nil { - o.ContentType = optional.ToString(uattrs.ContentType) - } - if uattrs.ContentLanguage != nil { - o.ContentLanguage = optional.ToString(uattrs.ContentLanguage) - } - if uattrs.ContentEncoding != nil { - o.ContentEncoding = optional.ToString(uattrs.ContentEncoding) - } - if uattrs.ContentDisposition != nil { - o.ContentDisposition = optional.ToString(uattrs.ContentDisposition) - } - if uattrs.CacheControl != nil { - o.CacheControl = optional.ToString(uattrs.CacheControl) - } - if !uattrs.CustomTime.IsZero() { - o.CustomTime = toProtoTimestamp(uattrs.CustomTime) - } - if uattrs.ACL != nil { - o.Acl = toProtoObjectACL(uattrs.ACL) - } - - o.Metadata = uattrs.Metadata - - return o -} - -// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. -type ObjectAttrs struct { - // Bucket is the name of the bucket containing this GCS object. - // This field is read-only. - Bucket string - - // Name is the name of the object within the bucket. - // This field is read-only. - Name string - - // ContentType is the MIME type of the object's content. - ContentType string - - // ContentLanguage is the content language of the object's content. - ContentLanguage string - - // CacheControl is the Cache-Control header to be sent in the response - // headers when serving the object data. - CacheControl string - - // EventBasedHold specifies whether an object is under event-based hold. New - // objects created in a bucket whose DefaultEventBasedHold is set will - // default to that value. - EventBasedHold bool - - // TemporaryHold specifies whether an object is under temporary hold. While - // this flag is set to true, the object is protected against deletion and - // overwrites. - TemporaryHold bool - - // RetentionExpirationTime is a server-determined value that specifies the - // earliest time that the object's retention period expires. - // This is a read-only field. - RetentionExpirationTime time.Time - - // ACL is the list of access control rules for the object. - ACL []ACLRule - - // If not empty, applies a predefined set of access controls. It should be set - // only when writing, copying or composing an object. When copying or composing, - // it acts as the destinationPredefinedAcl parameter. - // PredefinedACL is always empty for ObjectAttrs returned from the service. - // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert - // for valid values. - PredefinedACL string - - // Owner is the owner of the object. This field is read-only. - // - // If non-zero, it is in the form of "user-". - Owner string - - // Size is the length of the object's content. This field is read-only. - Size int64 - - // ContentEncoding is the encoding of the object's content. - ContentEncoding string - - // ContentDisposition is the optional Content-Disposition header of the object - // sent in the response headers. - ContentDisposition string - - // MD5 is the MD5 hash of the object's content. This field is read-only, - // except when used from a Writer. If set on a Writer, the uploaded - // data is rejected if its MD5 hash does not match this field. - MD5 []byte - - // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 - // polynomial. This field is read-only, except when used from a Writer or - // Composer. In those cases, if the SendCRC32C field in the Writer or Composer - // is set to is true, the uploaded data is rejected if its CRC32C hash does - // not match this field. - // - // Note: For a Writer, SendCRC32C must be set to true BEFORE the first call to - // Writer.Write() in order to send the checksum. - CRC32C uint32 - - // MediaLink is an URL to the object's content. This field is read-only. - MediaLink string - - // Metadata represents user-provided metadata, in key/value pairs. - // It can be nil if no metadata is provided. - // - // For object downloads using Reader, metadata keys are sent as headers. - // Therefore, avoid setting metadata keys using characters that are not valid - // for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6. - Metadata map[string]string - - // Generation is the generation number of the object's content. - // This field is read-only. - Generation int64 - - // Metageneration is the version of the metadata for this - // object at this generation. This field is used for preconditions - // and for detecting changes in metadata. A metageneration number - // is only meaningful in the context of a particular generation - // of a particular object. This field is read-only. - Metageneration int64 - - // StorageClass is the storage class of the object. This defines - // how objects are stored and determines the SLA and the cost of storage. - // Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". - // Defaults to "STANDARD". - // See https://cloud.google.com/storage/docs/storage-classes for all - // valid values. - StorageClass string - - // Created is the time the object was created. This field is read-only. - Created time.Time - - // Deleted is the time the object was deleted. - // If not deleted, it is the zero value. This field is read-only. - Deleted time.Time - - // Updated is the creation or modification time of the object. - // For buckets with versioning enabled, changing an object's - // metadata does not change this property. This field is read-only. - Updated time.Time - - // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the - // customer-supplied encryption key for the object. It is empty if there is - // no customer-supplied encryption key. - // See // https://cloud.google.com/storage/docs/encryption for more about - // encryption in Google Cloud Storage. - CustomerKeySHA256 string - - // Cloud KMS key name, in the form - // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, - // if the object is encrypted by such a key. - // - // Providing both a KMSKeyName and a customer-supplied encryption key (via - // ObjectHandle.Key) will result in an error when writing an object. - KMSKeyName string - - // Prefix is set only for ObjectAttrs which represent synthetic "directory - // entries" when iterating over buckets using Query.Delimiter. See - // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be - // populated. - Prefix string - - // Etag is the HTTP/1.1 Entity tag for the object. - // This field is read-only. - Etag string - - // A user-specified timestamp which can be applied to an object. This is - // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime - // LifecycleConditions to manage object lifecycles. - // - // CustomTime cannot be removed once set on an object. It can be updated to a - // later value but not to an earlier one. For more information see - // https://cloud.google.com/storage/docs/metadata#custom-time . - CustomTime time.Time - - // ComponentCount is the number of objects contained within a composite object. - // For non-composite objects, the value will be zero. - // This field is read-only. - ComponentCount int64 - - // Retention contains the retention configuration for this object. - // ObjectRetention cannot be configured or reported through the gRPC API. - Retention *ObjectRetention -} - -// ObjectRetention contains the retention configuration for this object. -type ObjectRetention struct { - // Mode is the retention policy's mode on this object. Valid values are - // "Locked" and "Unlocked". - // Locked retention policies cannot be changed. Unlocked policies require an - // override to change. - Mode string - - // RetainUntil is the time this object will be retained until. - RetainUntil time.Time -} - -func (r *ObjectRetention) toRawObjectRetention() *raw.ObjectRetention { - if r == nil { - return nil - } - return &raw.ObjectRetention{ - Mode: r.Mode, - RetainUntilTime: r.RetainUntil.Format(time.RFC3339), - } -} - -func toObjectRetention(r *raw.ObjectRetention) *ObjectRetention { - if r == nil { - return nil - } - return &ObjectRetention{ - Mode: r.Mode, - RetainUntil: convertTime(r.RetainUntilTime), - } -} - -// convertTime converts a time in RFC3339 format to time.Time. -// If any error occurs in parsing, the zero-value time.Time is silently returned. -func convertTime(t string) time.Time { - var r time.Time - if t != "" { - r, _ = time.Parse(time.RFC3339, t) - } - return r -} - -func convertProtoTime(t *timestamppb.Timestamp) time.Time { - var r time.Time - if t != nil { - r = t.AsTime() - } - return r -} - -func toProtoTimestamp(t time.Time) *timestamppb.Timestamp { - if t.IsZero() { - return nil - } - - return timestamppb.New(t) -} - -func newObject(o *raw.Object) *ObjectAttrs { - if o == nil { - return nil - } - owner := "" - if o.Owner != nil { - owner = o.Owner.Entity - } - md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) - crc32c, _ := decodeUint32(o.Crc32c) - var sha256 string - if o.CustomerEncryption != nil { - sha256 = o.CustomerEncryption.KeySha256 - } - return &ObjectAttrs{ - Bucket: o.Bucket, - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - EventBasedHold: o.EventBasedHold, - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: convertTime(o.RetentionExpirationTime), - ACL: toObjectACLRules(o.Acl), - Owner: owner, - ContentEncoding: o.ContentEncoding, - ContentDisposition: o.ContentDisposition, - Size: int64(o.Size), - MD5: md5, - CRC32C: crc32c, - MediaLink: o.MediaLink, - Metadata: o.Metadata, - Generation: o.Generation, - Metageneration: o.Metageneration, - StorageClass: o.StorageClass, - CustomerKeySHA256: sha256, - KMSKeyName: o.KmsKeyName, - Created: convertTime(o.TimeCreated), - Deleted: convertTime(o.TimeDeleted), - Updated: convertTime(o.Updated), - Etag: o.Etag, - CustomTime: convertTime(o.CustomTime), - ComponentCount: o.ComponentCount, - Retention: toObjectRetention(o.Retention), - } -} - -func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { - if o == nil { - return nil - } - return &ObjectAttrs{ - Bucket: parseBucketName(o.Bucket), - Name: o.Name, - ContentType: o.ContentType, - ContentLanguage: o.ContentLanguage, - CacheControl: o.CacheControl, - EventBasedHold: o.GetEventBasedHold(), - TemporaryHold: o.TemporaryHold, - RetentionExpirationTime: convertProtoTime(o.GetRetentionExpireTime()), - ACL: toObjectACLRulesFromProto(o.GetAcl()), - Owner: o.GetOwner().GetEntity(), - ContentEncoding: o.ContentEncoding, - ContentDisposition: o.ContentDisposition, - Size: int64(o.Size), - MD5: o.GetChecksums().GetMd5Hash(), - CRC32C: o.GetChecksums().GetCrc32C(), - Metadata: o.Metadata, - Generation: o.Generation, - Metageneration: o.Metageneration, - StorageClass: o.StorageClass, - // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. - CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), - KMSKeyName: o.GetKmsKey(), - Created: convertProtoTime(o.GetCreateTime()), - Deleted: convertProtoTime(o.GetDeleteTime()), - Updated: convertProtoTime(o.GetUpdateTime()), - CustomTime: convertProtoTime(o.GetCustomTime()), - ComponentCount: int64(o.ComponentCount), - } -} - -// Decode a uint32 encoded in Base64 in big-endian byte order. -func decodeUint32(b64 string) (uint32, error) { - d, err := base64.StdEncoding.DecodeString(b64) - if err != nil { - return 0, err - } - if len(d) != 4 { - return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) - } - return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil -} - -// Encode a uint32 as Base64 in big-endian byte order. -func encodeUint32(u uint32) string { - b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} - return base64.StdEncoding.EncodeToString(b) -} - -// Projection is enumerated type for Query.Projection. -type Projection int - -const ( - // ProjectionDefault returns all fields of objects. - ProjectionDefault Projection = iota - - // ProjectionFull returns all fields of objects. - ProjectionFull - - // ProjectionNoACL returns all fields of objects except for Owner and ACL. - ProjectionNoACL -) - -func (p Projection) String() string { - switch p { - case ProjectionFull: - return "full" - case ProjectionNoACL: - return "noAcl" - default: - return "" - } -} - -// Query represents a query to filter objects from a bucket. -type Query struct { - // Delimiter returns results in a directory-like fashion. - // Results will contain only objects whose names, aside from the - // prefix, do not contain delimiter. Objects whose names, - // aside from the prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in prefixes. - // Duplicate prefixes are omitted. - // Must be set to / when used with the MatchGlob parameter to filter results - // in a directory-like mode. - // Optional. - Delimiter string - - // Prefix is the prefix filter to query objects - // whose names begin with this prefix. - // Optional. - Prefix string - - // Versions indicates whether multiple versions of the same - // object will be included in the results. - Versions bool - - // attrSelection is used to select only specific fields to be returned by - // the query. It is set by the user calling SetAttrSelection. These - // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON - // clients respectively. - attrSelection []string - - // StartOffset is used to filter results to objects whose names are - // lexicographically equal to or after startOffset. If endOffset is also set, - // the objects listed will have names between startOffset (inclusive) and - // endOffset (exclusive). - StartOffset string - - // EndOffset is used to filter results to objects whose names are - // lexicographically before endOffset. If startOffset is also set, the objects - // listed will have names between startOffset (inclusive) and endOffset (exclusive). - EndOffset string - - // Projection defines the set of properties to return. It will default to ProjectionFull, - // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, - // which may improve performance when listing many objects. - Projection Projection - - // IncludeTrailingDelimiter controls how objects which end in a single - // instance of Delimiter (for example, if Query.Delimiter = "/" and the - // object name is "foo/bar/") are included in the results. By default, these - // objects only show up as prefixes. If IncludeTrailingDelimiter is set to - // true, they will also be included as objects and their metadata will be - // populated in the returned ObjectAttrs. - IncludeTrailingDelimiter bool - - // MatchGlob is a glob pattern used to filter results (for example, foo*bar). See - // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob - // for syntax details. When Delimiter is set in conjunction with MatchGlob, - // it must be set to /. - MatchGlob string - - // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of - // prefixes returned by the query. Only applicable if Delimiter is set to /. - // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. - IncludeFoldersAsPrefixes bool -} - -// attrToFieldMap maps the field names of ObjectAttrs to the underlying field -// names in the API call. Only the ObjectAttrs field names are visible to users -// because they are already part of the public API of the package. -var attrToFieldMap = map[string]string{ - "Bucket": "bucket", - "Name": "name", - "ContentType": "contentType", - "ContentLanguage": "contentLanguage", - "CacheControl": "cacheControl", - "EventBasedHold": "eventBasedHold", - "TemporaryHold": "temporaryHold", - "RetentionExpirationTime": "retentionExpirationTime", - "ACL": "acl", - "Owner": "owner", - "ContentEncoding": "contentEncoding", - "ContentDisposition": "contentDisposition", - "Size": "size", - "MD5": "md5Hash", - "CRC32C": "crc32c", - "MediaLink": "mediaLink", - "Metadata": "metadata", - "Generation": "generation", - "Metageneration": "metageneration", - "StorageClass": "storageClass", - "CustomerKeySHA256": "customerEncryption", - "KMSKeyName": "kmsKeyName", - "Created": "timeCreated", - "Deleted": "timeDeleted", - "Updated": "updated", - "Etag": "etag", - "CustomTime": "customTime", - "ComponentCount": "componentCount", - "Retention": "retention", -} - -// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field -// names in the protobuf Object message. -var attrToProtoFieldMap = map[string]string{ - "Name": "name", - "Bucket": "bucket", - "Etag": "etag", - "Generation": "generation", - "Metageneration": "metageneration", - "StorageClass": "storage_class", - "Size": "size", - "ContentEncoding": "content_encoding", - "ContentDisposition": "content_disposition", - "CacheControl": "cache_control", - "ACL": "acl", - "ContentLanguage": "content_language", - "Deleted": "delete_time", - "ContentType": "content_type", - "Created": "create_time", - "CRC32C": "checksums.crc32c", - "MD5": "checksums.md5_hash", - "Updated": "update_time", - "KMSKeyName": "kms_key", - "TemporaryHold": "temporary_hold", - "RetentionExpirationTime": "retention_expire_time", - "Metadata": "metadata", - "EventBasedHold": "event_based_hold", - "Owner": "owner", - "CustomerKeySHA256": "customer_encryption", - "CustomTime": "custom_time", - "ComponentCount": "component_count", - // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. - // "MediaLink": "mediaLink", - // TODO: add object retention - b/308194853 -} - -// SetAttrSelection makes the query populate only specific attributes of -// objects. When iterating over objects, if you only need each object's name -// and size, pass []string{"Name", "Size"} to this method. Only these fields -// will be fetched for each object across the network; the other fields of -// ObjectAttr will remain at their default values. This is a performance -// optimization; for more information, see -// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance -func (q *Query) SetAttrSelection(attrs []string) error { - // Validate selections. - for _, attr := range attrs { - // If the attr is acceptable for one of the two sets, then it is OK. - // If it is not acceptable for either, then return an error. - // The respective masking implementations ignore unknown attrs which - // makes switching between transports a little easier. - _, okJSON := attrToFieldMap[attr] - _, okGRPC := attrToProtoFieldMap[attr] - - if !okJSON && !okGRPC { - return fmt.Errorf("storage: attr %v is not valid", attr) - } - } - - q.attrSelection = attrs - - return nil -} - -func (q *Query) toFieldSelection() string { - if q == nil || len(q.attrSelection) == 0 { - return "" - } - fieldSet := make(map[string]bool) - - for _, attr := range q.attrSelection { - field, ok := attrToFieldMap[attr] - if !ok { - // Future proofing, skip unknown fields, let SetAttrSelection handle - // error modes. - continue - } - fieldSet[field] = true - } - - var s string - if len(fieldSet) > 0 { - var b bytes.Buffer - b.WriteString("prefixes,items(") - first := true - for field := range fieldSet { - if !first { - b.WriteString(",") - } - first = false - b.WriteString(field) - } - b.WriteString(")") - s = b.String() - } - return s -} - -func (q *Query) toFieldMask() *fieldmaskpb.FieldMask { - // The default behavior with no Query is ProjectionDefault (i.e. ProjectionFull). - if q == nil { - return &fieldmaskpb.FieldMask{Paths: []string{"*"}} - } - - // User selected attributes via q.SetAttrSeleciton. This takes precedence - // over the Projection. - if numSelected := len(q.attrSelection); numSelected > 0 { - protoFieldPaths := make([]string, 0, numSelected) - - for _, attr := range q.attrSelection { - pf, ok := attrToProtoFieldMap[attr] - if !ok { - // Future proofing, skip unknown fields, let SetAttrSelection - // handle error modes. - continue - } - protoFieldPaths = append(protoFieldPaths, pf) - } - - return &fieldmaskpb.FieldMask{Paths: protoFieldPaths} - } - - // ProjectDefault == ProjectionFull which means all fields. - fm := &fieldmaskpb.FieldMask{Paths: []string{"*"}} - if q.Projection == ProjectionNoACL { - paths := make([]string, 0, len(attrToProtoFieldMap)-2) // omitting two fields - for _, f := range attrToProtoFieldMap { - // Skip the acl and owner fields for "NoACL". - if f == "acl" || f == "owner" { - continue - } - paths = append(paths, f) - } - fm.Paths = paths - } - - return fm -} - -// Conditions constrain methods to act on specific generations of -// objects. -// -// The zero value is an empty set of constraints. Not all conditions or -// combinations of conditions are applicable to all methods. -// See https://cloud.google.com/storage/docs/generations-preconditions -// for details on how these operate. -type Conditions struct { - // Generation constraints. - // At most one of the following can be set to a non-zero value. - - // GenerationMatch specifies that the object must have the given generation - // for the operation to occur. - // If GenerationMatch is zero, it has no effect. - // Use DoesNotExist to specify that the object does not exist in the bucket. - GenerationMatch int64 - - // GenerationNotMatch specifies that the object must not have the given - // generation for the operation to occur. - // If GenerationNotMatch is zero, it has no effect. - // This condition only works for object reads if the WithJSONReads client - // option is set. - GenerationNotMatch int64 - - // DoesNotExist specifies that the object must not exist in the bucket for - // the operation to occur. - // If DoesNotExist is false, it has no effect. - DoesNotExist bool - - // Metadata generation constraints. - // At most one of the following can be set to a non-zero value. - - // MetagenerationMatch specifies that the object must have the given - // metageneration for the operation to occur. - // If MetagenerationMatch is zero, it has no effect. - MetagenerationMatch int64 - - // MetagenerationNotMatch specifies that the object must not have the given - // metageneration for the operation to occur. - // If MetagenerationNotMatch is zero, it has no effect. - // This condition only works for object reads if the WithJSONReads client - // option is set. - MetagenerationNotMatch int64 -} - -func (c *Conditions) validate(method string) error { - if *c == (Conditions{}) { - return fmt.Errorf("storage: %s: empty conditions", method) - } - if !c.isGenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) - } - if !c.isMetagenerationValid() { - return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) - } - return nil -} - -func (c *Conditions) isGenerationValid() bool { - n := 0 - if c.GenerationMatch != 0 { - n++ - } - if c.GenerationNotMatch != 0 { - n++ - } - if c.DoesNotExist { - n++ - } - return n <= 1 -} - -func (c *Conditions) isMetagenerationValid() bool { - return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 -} - -// applyConds modifies the provided call using the conditions in conds. -// call is something that quacks like a *raw.WhateverCall. -func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { - cval := reflect.ValueOf(call) - if gen >= 0 { - if !setGeneration(cval, gen) { - return fmt.Errorf("storage: %s: generation not supported", method) - } - } - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - if !setIfGenerationMatch(cval, conds.GenerationMatch) { - return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) - } - case conds.GenerationNotMatch != 0: - if !setIfGenerationNotMatch(cval, conds.GenerationNotMatch) { - return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) - } - case conds.DoesNotExist: - if !setIfGenerationMatch(cval, int64(0)) { - return fmt.Errorf("storage: %s: DoesNotExist not supported", method) - } - } - switch { - case conds.MetagenerationMatch != 0: - if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} - -func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { - if gen >= 0 { - call.SourceGeneration(gen) - } - if conds == nil { - return nil - } - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) - } - return nil -} - -func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error { - if gen >= 0 { - call.SourceGeneration = gen - } - if conds == nil { - return nil - } - if err := conds.validate("CopyTo source"); err != nil { - return err - } - switch { - case conds.GenerationMatch != 0: - call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch) - case conds.GenerationNotMatch != 0: - call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch) - case conds.DoesNotExist: - call.IfSourceGenerationMatch = proto.Int64(0) - } - switch { - case conds.MetagenerationMatch != 0: - call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch) - case conds.MetagenerationNotMatch != 0: - call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch) - } - return nil -} - -// setGeneration sets Generation on a *raw.WhateverCall. -// We can't use anonymous interfaces because the return type is -// different, since the field setters are builders. -// We also make sure to supply a compile-time constant to MethodByName; -// otherwise, the Go Linker will disable dead code elimination, leading -// to larger binaries for all packages that import storage. -func setGeneration(cval reflect.Value, value interface{}) bool { - return setCondition(cval.MethodByName("Generation"), value) -} - -// setIfGenerationMatch sets IfGenerationMatch on a *raw.WhateverCall. -// See also setGeneration. -func setIfGenerationMatch(cval reflect.Value, value interface{}) bool { - return setCondition(cval.MethodByName("IfGenerationMatch"), value) -} - -// setIfGenerationNotMatch sets IfGenerationNotMatch on a *raw.WhateverCall. -// See also setGeneration. -func setIfGenerationNotMatch(cval reflect.Value, value interface{}) bool { - return setCondition(cval.MethodByName("IfGenerationNotMatch"), value) -} - -// setIfMetagenerationMatch sets IfMetagenerationMatch on a *raw.WhateverCall. -// See also setGeneration. -func setIfMetagenerationMatch(cval reflect.Value, value interface{}) bool { - return setCondition(cval.MethodByName("IfMetagenerationMatch"), value) -} - -// setIfMetagenerationNotMatch sets IfMetagenerationNotMatch on a *raw.WhateverCall. -// See also setGeneration. -func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool { - return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value) -} - -func setCondition(setter reflect.Value, value interface{}) bool { - if setter.IsValid() { - setter.Call([]reflect.Value{reflect.ValueOf(value)}) - } - return setter.IsValid() -} - -// Retryer returns an object handle that is configured with custom retry -// behavior as specified by the options that are passed to it. All operations -// on the new handle will use the customized retry configuration. -// These retry options will merge with the bucket's retryer (if set) for the -// returned handle. Options passed into this method will take precedence over -// retry options on the bucket and client. Note that you must explicitly pass in -// each option you want to override. -func (o *ObjectHandle) Retryer(opts ...RetryOption) *ObjectHandle { - o2 := *o - var retry *retryConfig - if o.retry != nil { - // merge the options with the existing retry - retry = o.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - o2.retry = retry - o2.acl.retry = retry - return &o2 -} - -// SetRetry configures the client with custom retry behavior as specified by the -// options that are passed to it. All operations using this client will use the -// customized retry configuration. -// This should be called once before using the client for network operations, as -// there could be indeterminate behaviour with operations in progress. -// Retry options set on a bucket or object handle will take precedence over -// these options. -func (c *Client) SetRetry(opts ...RetryOption) { - var retry *retryConfig - if c.retry != nil { - // merge the options with the existing retry - retry = c.retry - } else { - retry = &retryConfig{} - } - for _, opt := range opts { - opt.apply(retry) - } - c.retry = retry -} - -// RetryOption allows users to configure non-default retry behavior for API -// calls made to GCS. -type RetryOption interface { - apply(config *retryConfig) -} - -// WithBackoff allows configuration of the backoff timing used for retries. -// Available configuration options (Initial, Max and Multiplier) are described -// at https://pkg.go.dev/github.com/googleapis/gax-go/v2#Backoff. If any fields -// are not supplied by the user, gax default values will be used. -func WithBackoff(backoff gax.Backoff) RetryOption { - return &withBackoff{ - backoff: backoff, - } -} - -type withBackoff struct { - backoff gax.Backoff -} - -func (wb *withBackoff) apply(config *retryConfig) { - config.backoff = &wb.backoff -} - -// WithMaxAttempts configures the maximum number of times an API call can be made -// in the case of retryable errors. -// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 -// times total (initial call plus 4 retries). -// Without this setting, operations will continue retrying indefinitely -// until either the context is canceled or a deadline is reached. -func WithMaxAttempts(maxAttempts int) RetryOption { - return &withMaxAttempts{ - maxAttempts: maxAttempts, - } -} - -type withMaxAttempts struct { - maxAttempts int -} - -func (wb *withMaxAttempts) apply(config *retryConfig) { - config.maxAttempts = &wb.maxAttempts -} - -// RetryPolicy describes the available policies for which operations should be -// retried. The default is `RetryIdempotent`. -type RetryPolicy int - -const ( - // RetryIdempotent causes only idempotent operations to be retried when the - // service returns a transient error. Using this policy, fully idempotent - // operations (such as `ObjectHandle.Attrs()`) will always be retried. - // Conditionally idempotent operations (for example `ObjectHandle.Update()`) - // will be retried only if the necessary conditions have been supplied (in - // the case of `ObjectHandle.Update()` this would mean supplying a - // `Conditions.MetagenerationMatch` condition is required). - RetryIdempotent RetryPolicy = iota - - // RetryAlways causes all operations to be retried when the service returns a - // transient error, regardless of idempotency considerations. - RetryAlways - - // RetryNever causes the client to not perform retries on failed operations. - RetryNever -) - -// WithPolicy allows the configuration of which operations should be performed -// with retries for transient errors. -func WithPolicy(policy RetryPolicy) RetryOption { - return &withPolicy{ - policy: policy, - } -} - -type withPolicy struct { - policy RetryPolicy -} - -func (ws *withPolicy) apply(config *retryConfig) { - config.policy = ws.policy -} - -// WithErrorFunc allows users to pass a custom function to the retryer. Errors -// will be retried if and only if `shouldRetry(err)` returns true. -// By default, the following errors are retried (see ShouldRetry for the default -// function): -// -// - HTTP responses with codes 408, 429, 502, 503, and 504. -// -// - Transient network errors such as connection reset and io.ErrUnexpectedEOF. -// -// - Errors which are considered transient using the Temporary() interface. -// -// - Wrapped versions of these errors. -// -// This option can be used to retry on a different set of errors than the -// default. Users can use the default ShouldRetry function inside their custom -// function if they only want to make minor modifications to default behavior. -func WithErrorFunc(shouldRetry func(err error) bool) RetryOption { - return &withErrorFunc{ - shouldRetry: shouldRetry, - } -} - -type withErrorFunc struct { - shouldRetry func(err error) bool -} - -func (wef *withErrorFunc) apply(config *retryConfig) { - config.shouldRetry = wef.shouldRetry -} - -type retryConfig struct { - backoff *gax.Backoff - policy RetryPolicy - shouldRetry func(err error) bool - maxAttempts *int -} - -func (r *retryConfig) clone() *retryConfig { - if r == nil { - return nil - } - - var bo *gax.Backoff - if r.backoff != nil { - bo = &gax.Backoff{ - Initial: r.backoff.Initial, - Max: r.backoff.Max, - Multiplier: r.backoff.Multiplier, - } - } - - return &retryConfig{ - backoff: bo, - policy: r.policy, - shouldRetry: r.shouldRetry, - maxAttempts: r.maxAttempts, - } -} - -// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods -// that modifyCall searches for by name. -type composeSourceObj struct { - src *raw.ComposeRequestSourceObjects -} - -func (c composeSourceObj) Generation(gen int64) { - c.src.Generation = gen -} - -func (c composeSourceObj) IfGenerationMatch(gen int64) { - // It's safe to overwrite ObjectPreconditions, since its only field is - // IfGenerationMatch. - c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ - IfGenerationMatch: gen, - } -} - -func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { - if key == nil { - return nil - } - // TODO(jbd): Ask the API team to return a more user-friendly error - // and avoid doing this check at the client level. - if len(key) != 32 { - return errors.New("storage: not a 32-byte AES-256 key") - } - var cs string - if copySource { - cs = "copy-source-" - } - headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm) - headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) - keyHash := sha256.Sum256(key) - headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) - return nil -} - -// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams. -func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams { - if key == nil { - return nil - } - keyHash := sha256.Sum256(key) - return &storagepb.CommonObjectRequestParams{ - EncryptionAlgorithm: aes256Algorithm, - EncryptionKeyBytes: key, - EncryptionKeySha256Bytes: keyHash[:], - } -} - -func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChecksums { - var checksums *storagepb.ObjectChecksums - if sendCRC32C { - checksums = &storagepb.ObjectChecksums{ - Crc32C: proto.Uint32(attrs.CRC32C), - } - } - if len(attrs.MD5) != 0 { - if checksums == nil { - checksums = &storagepb.ObjectChecksums{ - Md5Hash: attrs.MD5, - } - } else { - checksums.Md5Hash = attrs.MD5 - } - } - return checksums -} - -// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. -func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { - o := makeStorageOpts(true, c.retry, "") - return c.tc.GetServiceAccount(ctx, projectID, o...) - -} - -// bucketResourceName formats the given project ID and bucketResourceName ID -// into a Bucket resource name. This is the format necessary for the gRPC API as -// it conforms to the Resource-oriented design practices in https://google.aip.dev/121. -func bucketResourceName(p, b string) string { - return fmt.Sprintf("projects/%s/buckets/%s", p, b) -} - -// parseBucketName strips the leading resource path segment and returns the -// bucket ID, which is the simple Bucket name typical of the v1 API. -func parseBucketName(b string) string { - sep := strings.LastIndex(b, "/") - return b[sep+1:] -} - -// parseProjectNumber consume the given resource name and parses out the project -// number if one is present i.e. it is not a project ID. -func parseProjectNumber(r string) uint64 { - projectID := regexp.MustCompile(`projects\/([0-9]+)\/?`) - if matches := projectID.FindStringSubmatch(r); len(matches) > 0 { - // Capture group follows the matched segment. For example: - // input: projects/123/bars/456 - // output: [projects/123/, 123] - number, err := strconv.ParseUint(matches[1], 10, 64) - if err != nil { - return 0 - } - return number - } - - return 0 -} - -// toProjectResource accepts a project ID and formats it as a Project resource -// name. -func toProjectResource(project string) string { - return fmt.Sprintf("projects/%s", project) -} - -// setConditionProtoField uses protobuf reflection to set named condition field -// to the given condition value if supported on the protobuf message. -func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { - fields := m.Descriptor().Fields() - if rf := fields.ByName(protoreflect.Name(f)); rf != nil { - m.Set(rf, protoreflect.ValueOfInt64(v)) - return true - } - - return false -} - -// applyCondsProto validates and attempts to set the conditions on a protobuf -// message using protobuf reflection. -func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { - rmsg := msg.ProtoReflect() - - if gen >= 0 { - if !setConditionProtoField(rmsg, "generation", gen) { - return fmt.Errorf("storage: %s: generation not supported", method) - } - } - if conds == nil { - return nil - } - if err := conds.validate(method); err != nil { - return err - } - - switch { - case conds.GenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_generation_match", conds.GenerationMatch) { - return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) - } - case conds.GenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_generation_not_match", conds.GenerationNotMatch) { - return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) - } - case conds.DoesNotExist: - if !setConditionProtoField(rmsg, "if_generation_match", int64(0)) { - return fmt.Errorf("storage: %s: DoesNotExist not supported", method) - } - } - switch { - case conds.MetagenerationMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) - } - case conds.MetagenerationNotMatch != 0: - if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) { - return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) - } - } - return nil -} diff --git a/vendor/cloud.google.com/go/storage/storage.replay b/vendor/cloud.google.com/go/storage/storage.replay deleted file mode 100644 index 07ed1bfaf..000000000 --- a/vendor/cloud.google.com/go/storage/storage.replay +++ /dev/null @@ -1,30067 +0,0 @@ -{ - "Initial": "IjIwMTktMDUtMDJUMjI6MjM6NTMuNDAzNDMyMDEzWiI=", - "Version": "0.2", - "Converter": { - "ClearHeaders": [ - "^X-Goog-.*Encryption-Key$" - ], - "RemoveRequestHeaders": [ - "^Authorization$", - "^Proxy-Authorization$", - "^Connection$", - "^Content-Type$", - "^Date$", - "^Host$", - "^Transfer-Encoding$", - "^Via$", - "^X-Forwarded-.*$", - "^X-Cloud-Trace-Context$", - "^X-Goog-Api-Client$", - "^X-Google-.*$", - "^X-Gfe-.*$" - ], - "RemoveResponseHeaders": [ - "^X-Google-.*$", - "^X-Gfe-.*$" - ], - "ClearParams": null, - "RemoveParams": null - }, - "Entries": [ - { - "ID": "f5f231bed6e14b7f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:54 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrZvgYBWgsCwPaGI9bo1ccC0WCBc8kJgydTwioDtXR9xps4HiDoKXI-vjYUl876SMqF0JhmhaEBgvxrIL9Y989YCFrH65xGys_r1JbPdi9M9N0kS3M" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "9a9914424ef59619", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqFvRYrCleVqpn0QshSvzW5I1-8o7N6vGYh8o5G1f-AHnsX2N_x-NKJrvlxnXqm9auw5gMoWFaJTSTtKL5y85WlQ_eAjmmlrkD4tbHYBZJ386xgaZw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "17f2abbdd781a33b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:55 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYoTmTG5mxpFGPvmECUTlGMlQwhfmqGsZtBtZ9xV89Pw3q-p5BBeX_3imdofr_7EBT7nBm4v5alpg45Zi8a8ET28qBH2xfNe4n15HR-1fhGou2wQU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "6752f5a9a036af11", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4bFsk4ylv96GsTkuDKG--hVaCR_UEhZ_fAzMGt5Eu5ZKncHOLjU_f2PcNP9saFGW-UkH9jXwt_nuR0G2zXOBjMJmLdd7Ml61bGMMrJeVa0OtcGpM" - ] - }, - "Body": "" - } - }, - { - "ID": "9c25646df7aacad9", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "543" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "926" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq6CjN9PjjzT3LmHxW_tU_ciQ1rahetoQGbX_gX8EC5il7tPJi2yxi5VZxnDNrp1h14b7Ix8tnvtkHufAyO1-lMRutdHK5GSzonff78Nm6KPAKN5fU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "f795b9adcb1b546e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2872" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up2-mWQyRDbFSpF6U96vQpaBYr74NgiUWh3-KZnLWaFYnhQti1tgKWNtL15YgK8blaRSnzGeACPA6jNuM34yhr7bxztrdN2tobEQAzD5RVgzpqx14w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "2ee3f84c4e4045fb", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UplJEr-Hxa3hDFT5ozLEHYhHfaxlYFpc9Vwm8AL831-w_7BBgxHjjEsU8Br_uLnLes0h9hz37iuE9V8uVZ2liHY7ZD4piNH31oyapjCtwyXrukIP94" - ] - }, - "Body": "" - } - }, - { - "ID": "16f19dbf8e206756", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:23:57 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqvp5XjvB8nhNuz-bTeN9OklTfiBGldYKkcY13JF6oUfpV0z_jwoEQD3B3Ss3wWpaSmZfePjo7fkkr-hP3jbrUazNHaqQliiHqOBSNmSoPmwJpzfOI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "949f5ce411d6f672", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:58 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpVeVcmmuUyOt3Hbja89_Ewi6GRsJtRduqK93OT4Ys1aK5GqDWeGxyDbczUyRLeUYvZgtJzYLwVOOUszqAF4ipSXgZ1L_byd9cJ7ttVfQ_ceQBXxY4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "b75303fbdafb66d0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "64" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:58 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqDnNlC3m95GplHjE79aqzhtwgfJCWQjHaCFG4i7qmTFliz2gdE4OiOnKAPIoNqxEngE35065YXNYA65aMSSeEluDKmQ__rJXcS_DpRdoYP4rZIyPo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OC40MzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxhYmVscyI6eyJlbXB0eSI6IiIsImwxIjoidjEifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "831805b62d969707", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "93" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiYWJzZW50IjpudWxsLCJlbXB0eSI6bnVsbCwibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2495" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq8yNb3V9Kq8zPa_pdVrJIYv83v4fu6xAHwktfTz_Cy4K1rpi8xrDzYmw5wICaazfMcAiYPhM8r4Y6WkeOyeCRInvkJ6ndduhNN_dgu1U59uI5F3Qc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS4wMzJaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "8a816d061fe9e7e0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "77" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2570" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:23:59 GMT" - ], - "Etag": [ - "CAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrbjywEmDPkqxln7-Nx_8ngRxoWvncfCx1fGVpPZGEjjmg8OJgv0uaczxapjlNeEcvMnqWI_RVzG6_588QaO8nCnPVnE2kkIek3D_t6UNL9CCPYLRc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS41MzBaIiwibWV0YWdlbmVyYXRpb24iOiI0IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FRPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVE9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBUT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVE9In0=" - } - }, - { - "ID": "6a60397ebae323e7", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiYnVja2V0UG9saWN5T25seSJ9Cg==", - "dGVzdA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3305" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:00 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UokOvJnDKKHAQOa8mJLrxFpWWvQ2U_BC_3uI0Z4x870Q068evHio_t_YudbSq614h77-ofhBsyHpoknWnm_YrnXxkHzopreKoMBykFIcbsSB8TDKEE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRQb2xpY3lPbmx5LzE1NTY4MzU4NDAwNTUxNjEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5IiwibmFtZSI6ImJ1Y2tldFBvbGljeU9ubHkiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMC4wNTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDAuMDU0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAwLjA1NFoiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJDWTlyelVZaDAzUEszazZESmllMDlnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seT9nZW5lcmF0aW9uPTE1NTY4MzU4NDAwNTUxNjEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJocUJ5d0E9PSIsImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0=" - } - }, - { - "ID": "32d392d1da32f27b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl/user-test%40example.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "111" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:00 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoau0xgFp6ib5wM0bBWjRlklvDRPOu0VZ-LFCeENUWXutmkXSfgUbtr2Nuefb7Pm_yLvCNqtB9B6k_N1V7AlvkN4_JEz67ZSXQ_sAD5L1teQIpGiqA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InVzZXItdGVzdEBleGFtcGxlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9" - } - }, - { - "ID": "6e3d0eda38a3ab6e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "59" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6dHJ1ZX19fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "663" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpSPFJHLzusxOr_OvhStJlWEpOs2EuthMO0Ys6pS9bsQeP0fthp_VUZfa8_sN8TX6PJYpxIdFlxB2QUaIujot1cUrssoU74XFrAwoqhlmiE5y9Aw-w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMS4yNDJaIiwibWV0YWdlbmVyYXRpb24iOiI1IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOnRydWUsImxvY2tlZFRpbWUiOiIyMDE5LTA3LTMxVDIyOjI0OjAxLjIzMFoifX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FVPSJ9" - } - }, - { - "ID": "8f8dbb687dc49edb", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13230" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:01 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGpW2zLlN7nAgV5IVkKU3kx4QWHCkzAgMa-QPC1PyCol8CP9W605bMUmFMeerbR4enzmeNMvtb4a2HzBPUZ296YfGdtkt_6Guq82E226xzC5TPq4w" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"invalid","message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.INVALID_VALUE, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=INVALID_VALUE, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., unnamedArguments=[]}, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., reason=invalid, rpcCode=400} Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only."}}" - } - }, - { - "ID": "42ce6421b2c9fae4", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13358" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJwxja3nYzWYbg_I5gWvOiow2ORuo8tNA-_Vzw7DX_YVhhb6_p1giUk3WjUHWt-lyDA13adhPGi4BDfIXzQyf-ZL3lsHoa2sNm29BIqRznw4mkAdE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., unnamedArguments=[]}, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., reason=forbidden, rpcCode=403} another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly."}}" - } - }, - { - "ID": "0ca0bddf513110f4", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "45" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnt9fX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "624" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpdDWPDU9-gPzHEieE0Rqx_40yf8fJLhwAP6fVsdS4F7I7sWj0h-Ti7VoDWciZgI_lgNUB7qyh08wjTAxrTLTsSiIYt2GR6ksxhDRupMoPWni5kXmE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FZPSJ9" - } - }, - { - "ID": "8a8bee740102593d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2964" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Etag": [ - "CPmu4bnx/eECEAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq59Mf8Ea4fgpDnUzupeIP3bGt3VpyI6HjL4KJtDKAD_h-Ua-AJSX3u3x4TCsx2MZcIVhMs9pW9SWsrIcsvsr3kGt2Je9W87LElbN5dlw02EItBcR4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJidWNrZXRQb2xpY3lPbmx5IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDAwNTUxNjEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwiZXRhZyI6IkNQbXU0Ym54L2VFQ0VBST0ifV19" - } - }, - { - "ID": "91ba2c22c5f5de9a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrwotKay181GHzZsWfp6BzJA4FDOIfK2s1WlzB9p8QsIEX42AtvMhkgLWqSIyEhb-MSv9snqx0WRwUs5sDN3_5NVoFTzQeLkDBR9mbsixI-udc8SLI" - ] - }, - "Body": "" - } - }, - { - "ID": "43fec6c3a8c8cb63", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY29uZGRlbCJ9Cg==", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3161" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Etag": [ - "CNHLq7vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrrYnzZRz4GFV3BsHoF-vv9v2Lc13Wp6-P5iowSZFjqyykVhYZ6CkesIVxA2v2xNCbVeWgCBXCFFt9fje73cis1cECwwqrYLr5QF5bZCy4TsJ-LT8k" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb25kZGVsLzE1NTY4MzU4NDMzNjg0MDEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsIiwibmFtZSI6ImNvbmRkZWwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMy4zNjhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDMuMzY4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAzLjM2OFoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbD9nZW5lcmF0aW9uPTE1NTY4MzU4NDMzNjg0MDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbmRkZWwvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbmRkZWwvMTU1NjgzNTg0MzM2ODQwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0=" - } - }, - { - "ID": "ba03d9efb1402903", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368400\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12249" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-mcvaL-eBgBoviwBg_r8LyEEK3JNyFaj0cFy2neOMo1pVkWpkGQ-3gz8vQNtAGoX-Q7_CMYLNv_I0pOoy5iRr-MdYKny5ICdC1s3ji5DwL7sqmn0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/conddel: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel"}}" - } - }, - { - "ID": "e26af2cd4673cc7c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationMatch=2\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 412, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12051" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uopfd5TJBzhGauzgyZW0h-TNFtDHz34k0kjbeuTeQPnMGBaiSFz9FdWA2gxp7qKp-V586voh7kqHgnVSW_QI4bWEuC35pj8NbCmmpNL_9pstpCgckw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" - } - }, - { - "ID": "857f8a30eb55b023", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationNotMatch=1\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 304, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uprv3QMjU4zz49ScYI4YaY9FitNwO7wGMQ1KZ8Y99w4D7keznFfA8M80bMdKvSH55jsWsDoLKcQ1VvIqIEUw88NqSfUM7E5SR_y5zfDaCf_uHSlgPM" - ] - }, - "Body": "" - } - }, - { - "ID": "4c69a7dc19302935", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368401\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpoiZa1zqMV-8ZkeUrvT6Xi0uJul6yGWWth5s3YbFtA2p0-6vc_54sNtEbxbnsASZyMDXLelHaCSixgcVT6JDVELrXHDim9gHcjjlSTF6BsHWu181A" - ] - }, - "Body": "" - } - }, - { - "ID": "834f05b1eb2dbc32", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMSJ9Cg==", - "TuDshcL7vdCAXh8L42NvEQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozIFQVqWdhJxDLMyV7dfkeraSOw2Mw_AsI_aCWnAUudrz4HjQgZ4kbnvKXJfNF3SMQhxzxZHhk1Otmw7PgPxL7HyDkPDyK1DeHDc8GyfY1B3Q3YfA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "34a1730e8bbe1d09", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMiJ9Cg==", - "55GZ37DvGFQS3PnkEKv3Jg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "CJ2Xkrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVrbP1vUH6TU_zfm_Aaca624z-91MoULnLIBcn9Hg4htv5T5Z6B4XYJMFZUuDjWWeBhR6EpG1UZL4iJe0TJybYQ2CCsB_k63CcOex39xaOIT8UYUA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "b7c2c8ec1e65fb6d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9Cg==", - "kT7fkMXrRdrhn2P2+EeT5g==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3366" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "COqurrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrEgFAbU2gAqyKMITI9vr9kAgIBfNY3ZGs1l2_X4e-fVtNb01M9N81N-83LdChVzrk8iB09yuKUKdxRc0nMYrgy7S1fqPwbJdsQ6TJfMB05JJj6qr4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "0dbae3d4b454f434", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3366" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Etag": [ - "COqurrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqk7CzWGAqaT12X_fTymveyPtsPJIPHD814QaAOIQLA_AJo_4OtnQOOXJM2jJhIZ1Co4NgEmboDUG9su4SN5fDTHObNh9elLts9VpUJ9iSYrIsn-Os" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "3793882b91d38a07", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpzUH_BRjU20OGbDAW2dy0lx9Gk_1Ko7zks6KG6OEpq0pBPfCIBjkCeIZTxKxtvxnOd1hqlZF7SlbNoyNUqX5UFXpZY5NjP5GTeUkm512vaBR5vnDI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "39924c2826bcd33f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Etag": [ - "CJ2Xkrzx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgOr-dpWoY-JJi6vRigJX3Mpi_WQ4zWvWKK382DCP-mzWSnrpbaOzijhTCdNz6pmXskVVs30APwlqZgvb-S6xAwXqKJG5n6832Py4UlTdDR_INTew" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" - } - }, - { - "ID": "89ea9ab5e21a635e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqj9dj5PbUhIU3baBv98vMViDP-BnVPs0APrFYL4jSbKc7fK6eAGoyDfsccGzxK-t0lGvozizW4ltrga8DXo_oxlZ9-k5a85v9i0PWTFIisPC7xuL8" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "b27c106562362f6b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3446" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoIo3JJa2rfg9_o3bXN_5QU6XU_iIrWYfuEuvKTQL0O5tjIHkODIDd4biyHxjbGNFnrQNbkiEUXWEBlo-3fTVgfFTOWuaPpgae-SafTBP8h5X5ddJ4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" - } - }, - { - "ID": "ca654b46531c4cb2", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqINI0Ii418NlxoYErOrjhTKb3-G3n-8h1Ryc_4YT4Tksc7WXA9m4CcJWInyhJahYC-UUrM47O5EK-3FUt3toZOZxWlbwhE6Sfnra0H-CqjmvxWyRs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "0186889a60e651db", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFTK3QnFf4m9htpY4s3P9_NHuDiQl8InpJoWPOaQHo0XaYnSoSqkH7CcUbm0-sWamsCZbd4DHoXHLCrea3ff9rLpWlptkL0aMKV_Dleb1Xm-j14fQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "a253776f79c46fe1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3446" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoadifz-4B7kC7DvXOyrNa5omK5-DXUNJBtT_d5I1jciPUvGlRm1L1vkvVMU1Y5n9zFig2CF_qqBLlcdvoYCaUeaNUu706L0fKJhJkA8vV5JUvFq0E" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" - } - }, - { - "ID": "dc8959751769ee9c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLeV9n2aiAq15pl3CqEWOt3_OYHsxbG0lHFIKr7Emh5btUEXLXl4nZWPi27LinKW7i0LYvp-HZK5b9E742MR_PNFe87treTpz_QgUmchGPKLcdZVM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "23f7f167269db6b2", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo4hY4jSbETgB5jf-AU8_b1sRvfTSrlt_Pt6UXiuTte4GtueVBgDwuMEliwE2-nOSiRE6juXOCbR1LQlRrpek1TLeRRf1cCVbz90YcCIGhWV_zFz0o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "518c5855f7f2035b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "6581" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGaiKkmr1oBduBvG8APGBVoM_Ve7zHw4TBuyV3QYcFr9SYzEnATEwE5P6BH-yBsVXSmaRLej1a55x18Bra_ZAC7nYh2UKvWM66JGE3U1muaDBabyw" - ] - }, - "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" - } - }, - { - "ID": "5887398d8cd8c906", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGAvr7ojM3PcmnrTMTX-TXgYKzfo-3gzwYU_l9OaCfpth_ad2lWUJf6w6B8pjEGsAFZJvNueCHYGSJdx6YJ7NAe71oah1xi6lGQvQkWEwAl0fXd7Y" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "81d53e304c7ed90d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "6581" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur9XEMeCefUS1K7QkIxrxZboQ1zQ5SkrEUZMzrQdNBNAuDQnpDczXzJvF-rZmxFVYSdMySScTTu-FICOdI91b-fQVRomrqAxJwgn60FoObIVVpgkog" - ] - }, - "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" - } - }, - { - "ID": "0be1bdae5ba86bfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3187" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UocXesX_BUCirPixQFUhYfAIoR0Os309HMGHarzTGrqH1PkmHuGaaiNSH_pJq8nnWUXEnjk1cvuPGJWCF21t7EJQtCIGBWQoPBNV6OpvALZGdpPXuI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "592a3fdf8b5a83e5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoS2FM55Dkg1PRaNQBzsu5KBns-svmRrQ5byrUGRHgnsBQQDdE3ZznljVTrNq3wDAClddQ4zbCMQSLdqxPPNlom-n1tMg1hO8s6kS8_CTWy2SOM6As" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "b01846866b585241", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo22tWMx8uW6afXbfFrFT8UZzPbsqPItYMdRAPHNEuksgeqWI2US_xblUG6C8WgcHvfEeR_19eVmBTmW6QE7rmosaIm_raZxW9FuCHHiSi8TJnZ1CI" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "2eeefee032c6e805", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq55nB00t16AHTo1gSxBFUNplz5SJcGYBOeCPsK-MD5OLO_kHKT3YNZq69AAHpozaWVrTU_jDQqiqMFSoPhhmlF2dR1mcq6Ihk_y8uuFY6FM4O_hmI" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "11bfe17bc978cdfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "9705" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotgN85lDspseXEgwnXhmfSmio9oOxSzhqtyabApVH0KQz_aVg3DbQ7m0Z0L9SzPzzmNEUfU6xhAT5xQAXf-wvY1NHDFxdf9IJ6YoVNhN0aHN-75hY" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" - } - }, - { - "ID": "c05bfa6f1a43381b", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:11 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrVVjXVa2SMY6cjYeIgdcZOivQwE2Crz6UPZs2VXVBFMwReZs7kKbETuTMwuEMHKKm_LIrk-o6jS07MimubSWVxGMzT1BtCacU1X2Go_RckmyJPZso" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "f2932604385db16e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrADckAOw2vMHdy0qy02f20x-8s_Ue81-NYwSpKeqp6Zx6eCPReLM6qXUyxFz0xHNGob5WJA8J4ctl6ZFzL2fEVwBMNY8xxzpkKOAwSu9n9e0OE63E" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "8581afc6216ad2cb", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"082d70970acc388ab476f32433295486\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845049245" - ], - "X-Goog-Hash": [ - "crc32c=jV5AVQ==", - "md5=CC1wlwrMOIq0dvMkMylUhg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrkoYpGTn57WC5t-sItsEdWimp47AtSuqw8autFOD3TirChrdQThg_Fh-kykfgvnTaOyiw1InKeYs0Z2MISmjUpu4uHUUGDKTX6W0zQEuUks7i2BqQ" - ] - }, - "Body": "55GZ37DvGFQS3PnkEKv3Jg==" - } - }, - { - "ID": "ae129e597d5dd3e8", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"082d70970acc388ab476f32433295486\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845049245" - ], - "X-Goog-Hash": [ - "crc32c=jV5AVQ==", - "md5=CC1wlwrMOIq0dvMkMylUhg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoaoET13zBO6-kmfarxy5tCZhmqc2oO38xOd6m6OQ55e-MyyKYM35hNakm5xYdWaoUNsKwVCiklp4HtYE8afVObxcDERuTCgyTw-olewCN6VBwd-H0" - ] - }, - "Body": "55GZ37DvGFQS3PnkEKv3Jg==" - } - }, - { - "ID": "11acf26dc7680b3e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"b9ecff849eb002b78342771c0d47717e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845511018" - ], - "X-Goog-Hash": [ - "crc32c=oeo+FA==", - "md5=uez/hJ6wAreDQnccDUdxfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up_XtsS2bu4fnGI-Z85gqz1MECYqweRqin42arAyEfpJpZwmadI1-Op9V7n-q3UaYyRtUWFR7an7zKxWhJ_CB6PXz-C55C1nPoI7EWVEV2oXcS-q8c" - ] - }, - "Body": "kT7fkMXrRdrhn2P2+EeT5g==" - } - }, - { - "ID": "ff72204b0c538268", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"b9ecff849eb002b78342771c0d47717e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:05 GMT" - ], - "X-Goog-Generation": [ - "1556835845511018" - ], - "X-Goog-Hash": [ - "crc32c=oeo+FA==", - "md5=uez/hJ6wAreDQnccDUdxfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrjLdnTX47-_XGNBlBZ0rEgsIkYJMzixBaVmhjn8IsK66UpQAUO-njMM-WznI-DBNIbXVTcPQKZBNe2ZHZEskfuRjsFwMjtooyUH_PqTfZnARVv8M" - ] - }, - "Body": "kT7fkMXrRdrhn2P2+EeT5g==" - } - }, - { - "ID": "7972502f866e015e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-15" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNiJIZYFETpngMkDItRU7PtN7Fv4bDU7Kfvndst1JR2eGmW-tOMTfng8Abx6EWwPvvodLkSk-1TM4Gywxzh3c24gPDN4NsQMuh40Mfp0Jvg1syOls" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "251a62254c59cbd1", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-7" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 0-7/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UobLFpkqodTM1bHuzS0GzRCKZypPQkf3nH1hbOexCjgjAcheZhV_zxJRyLhOYZoW8ABEaVMJLULNrzAm1VZs4JrTdtT46fYIcQe7OBeJp0aHI7Lr5s" - ] - }, - "Body": "TuDshcL7vdA=" - } - }, - { - "ID": "2482617229166e50", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=8-23" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 8-15/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uon8ZtKgOTqFm36bWHd3hf535jPGQBEX9j2yPrr11_ycAJjfI4A-mFWMTsFRR3nwSo68784B9TUPUjQd0cib0QIrfBjfDcz91p6oPJG3VpV9afywIY" - ] - }, - "Body": "gF4fC+NjbxE=" - } - }, - { - "ID": "74ff0c5848c7dba6", - "Request": { - "Method": "HEAD", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:12 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKc-08ZJEWVbyaV84xKlnrHkl2PBuBUQP6xtx6TzZW0fsjyZ-SHshiy-QMGCuP1UF4x1ettHyDvRbAleHIXy4y7wMwoUWST2NKs_0oRA0oVqOoKtQ" - ] - }, - "Body": "" - } - }, - { - "ID": "90888164f5d2d40d", - "Request": { - "Method": "HEAD", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTSlg3r_LSoiqQyJVJDzL-0xNj6jlSNp9zLUmlPtu3xbhCLBDNAxY9CRbyEjw7UudDcsFOe43C3QlsVjoBJln1q179ZspYWXY-fHqjrztAfJkZUpU" - ] - }, - "Body": "" - } - }, - { - "ID": "7961f3dc74fa0663", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=8-" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "8" - ], - "Content-Range": [ - "bytes 8-15/16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJjC8HFLb8u_EaJVF0_TUOhNWhbooW0oANE8_LPjoIdLcoU42caZe7LjgbTIjCb0Ss3horP2noxirF3u3FGq0Yoh4rjuHBVhwZcemZHnKLgJUUbQQ" - ] - }, - "Body": "gF4fC+NjbxE=" - } - }, - { - "ID": "af8c1fcfa456592a", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=0-31" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqq6ro5-i6q8pIDup0yW35uFEynuLK95P8OSFhj7b70DK33tjv3fkOdkobsfwplOAGuNME2bLjQuyy8mhd2xBbNjyjvXt13Nc48PB4M2t_46RoM3Ak" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "d312c241da5bd348", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Range": [ - "bytes=32-41" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 416, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "167" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpAvzLs-GgdTPEGM2kvABFTxehSb-DhZQ2Y31nQ9ad0RMmfIMtVGRBXvkXo7FwGAy78ZHMSGWRKG-DYMy6JU9PCM4Tpo8PDmm4-rHa_LVpH2dFN-JM" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+SW52YWxpZFJhbmdlPC9Db2RlPjxNZXNzYWdlPlRoZSByZXF1ZXN0ZWQgcmFuZ2UgY2Fubm90IGJlIHNhdGlzZmllZC48L01lc3NhZ2U+PERldGFpbHM+Ynl0ZXM9MzItNDE8L0RldGFpbHM+PC9FcnJvcj4=" - } - }, - { - "ID": "2bbd010d0173c966", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZDScoUU4zzBoXc21jt6C1vcW1SrU6ELiWodrCID-OoNKVMbecv_DSgHZATQGs4GS-BebkzqdalqTq5C77aKXQMxiks-9A5kyrc8N4aY9L5YndaI8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" - } - }, - { - "ID": "e3522e1cc7aef940", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2570" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:13 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5x_r1PKMQrnKbsfUWRR6wBwFQTM8U9mGwyq_fK56rrHE_UoLDxLRpiuaFGLVi9L4Hj67UXH76drA6KQQQgOsMVo0YEl1pVS2P7OKQ64WF4NubRCU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FZPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBWT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBWT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVk9In0=" - } - }, - { - "ID": "77ed46692038573c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3333" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:14 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpBnZgUt8GyAer5VG-_nEuf3YizRjF2t4MS6vOflI_Vid1-zVpQ99TuphB3pZjPZNI5ZoHJxCmDv3lyLwyhUDvk_p_NO9x8qZTEpjf1EOO5uxRsOxo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDE0NDc5MiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC4xNDRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuMTQ0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0LjE0NFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQxNDQ3OTImYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvcHktb2JqMS8xNTU2ODM1ODU0MTQ0NzkyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In19" - } - }, - { - "ID": "9187dbb998c64d40", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "31" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb250ZW50RW5jb2RpbmciOiJpZGVudGl0eSJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3299" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:14 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpyrF7U9KSc3XcJb-T_KGf3Fg2yhFUjJqTl06wpqqhG5IT6chgLCWKnLTuamfHtVq8XcP7acZy4TGyKIEvZ4L36TGTfSM8Zp_JyF8K4rN5p375VBMc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDc0NzU0MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC43NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuNzQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0Ljc0N1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQ3NDc1NDMmYWx0PW1lZGlhIiwiY29udGVudEVuY29kaW5nIjoiaWRlbnRpdHkiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQ3NDc1NDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkNUNmRUQT09IiwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "c199412d75fadc45", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "193" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJhY2wiOlt7ImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9XSwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm1ldGFkYXRhIjp7ImtleSI6InZhbHVlIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2046" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:15 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKO5A_yihnTzTSNbXxyLm-I3MkyC58APsD1U6RZ5_xpjjjL3nXVZIyACkeiDKXRZyU_oP1Yt0FeX23ONp6JeNyoy0J0kW8GwGMgPeN1ATdWGzCMXs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS4yMjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJtZXRhZGF0YSI6eyJrZXkiOiJ2YWx1ZSJ9LCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiIsImRvbWFpbiI6Imdvb2dsZS5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9" - } - }, - { - "ID": "c845025158c6b7d0", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "120" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOm51bGwsImNvbnRlbnRUeXBlIjpudWxsLCJtZXRhZGF0YSI6bnVsbH0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1970" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:15 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UryOVdmGzCqBOS2ZZ0nw1SqbHF7qJ1CeTxgb6a1BJyoA42NTNZ8RdEVQGNF5u2hWdSQW79cBOonxjms_MoogyDJVAtKHl1rys87QHF6-750NNNSoEw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuNTI3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0=" - } - }, - { - "ID": "811b0b9d4980b14f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY2hlY2tzdW0tb2JqZWN0In0K", - "aGVsbG93b3JsZA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3305" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:16 GMT" - ], - "Etag": [ - "CIGhrMHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqJTh_P91pby-vQkRF12GexDihy7TZAlUPamDV_REldvXeqvbrWY_kB0Vcp1lYyuIY7EK2_eMBYYSRMMIVEYz5fxxt51-9Br_UmYvnuRgjhfC16AuA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jaGVja3N1bS1vYmplY3QvMTU1NjgzNTg1NTk2MjI0MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdCIsIm5hbWUiOiJjaGVja3N1bS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE1Ljk2MVoiLCJzaXplIjoiMTAiLCJtZDVIYXNoIjoiL0Y0RGpUaWxjRElJVkVIbi9uQVFzQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdD9nZW5lcmF0aW9uPTE1NTY4MzU4NTU5NjIyNDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY2hlY2tzdW0tb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NoZWNrc3VtLW9iamVjdC8xNTU2ODM1ODU1OTYyMjQxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jaGVja3N1bS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJWc3UwZ0E9PSIsImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0=" - } - }, - { - "ID": "69c77b8bdd7cc643", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVyby1vYmplY3QifQo=", - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3240" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:16 GMT" - ], - "Etag": [ - "CLirz8Hx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2iXVwACD0yFKYjt0WT-lW1Tx6PtpOgDYttPBWBnJZ3CPf4cUK7heI_9SwdzYXoieaRHDj9n3w3M_SExedwQqQ-GTOY9qM9DPmrPy11hEny0WjECc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QiLCJuYW1lIjoiemVyby1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNi41MzZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTYuNTM2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE2LjUzNloiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3Q/Z2VuZXJhdGlvbj0xNTU2ODM1ODU2NTM3MDE2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvLW9iamVjdC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQUFBQUFBPT0iLCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9" - } - }, - { - "ID": "1017883279ca634a", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "98" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "417" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Etag": [ - "CLnS+bvx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKqYaaFUj6b4ezS4lnnQgYv4CHVSsqhVSlLP3QqpItNahh25KnLzbTccK_idrfjKV0gExzr17MvFmoRw7oUIWyJJINmpEXEw5EmHScxipsb3KbWZ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvYWxsVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0xuUytidngvZUVDRUFRPSJ9" - } - }, - { - "ID": "35f04bf2400be96f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Etag": [ - "\"4a76bf516bfb99c648db7a04e6d51a6d\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:17 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:04 GMT" - ], - "X-Goog-Generation": [ - "1556835844647225" - ], - "X-Goog-Hash": [ - "crc32c=CT6dTA==", - "md5=Sna/UWv7mcZI23oE5tUabQ==" - ], - "X-Goog-Metageneration": [ - "4" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqC9teasrZdeHJo8KLaQQ251d02ORMxXreH3TKcaQp4NWLVbpiAw1GpW9WeSdFgbpWtVDdyzfjE93gAs6uA0kdlDPZsIEfJVK3GFfaZE2aW5aFCn8Y" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEQ==" - } - }, - { - "ID": "d01040504d2a91c4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoib2JqMSJ9Cg==", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 401, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "30343" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:17 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "Www-Authenticate": [ - "Bearer realm=\"https://accounts.google.com/\"" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqRhLZOcm0K7eNByUzCBgYfgiytuZ6Hj06jPkbkK77LS80OPgO_78AL530-2AcrlLe1fIy0jM0tonLLncLuOszrqKcGgVhqEJuE48-67vqBRACjqmY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1."}}" - } - }, - { - "ID": "db327c92dac99584", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoa0LrEhOIeq-iYERbwsOkXjr0QR1ANe3tDsleUqoFNU7fRIruaqYikdnU1svzhc8iGRwx0A5dAGYQ_mM045LN_oxU1c76ugXtnWS2PW8wBcmst7hk" - ] - }, - "Body": "" - } - }, - { - "ID": "e70c028bd3dc9250", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12275" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrvFq18HewCbAkJcD7BmKgE8_WOFqSNkcwbpbNqCDPIy5jLVf2fIaHdg76_1rHuAHTtFM84Zr6euptOehRZOqA38Zc_BrJUSZKjg686imvazehpBc" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" - } - }, - { - "ID": "4d7120b41e583669", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoJ1cGOQjoNiRk8qv-igyccVp3Vg_ut7NLSEO3A-yyQwgGnoXR5jPbi3tuomYWrS57GIvd6GpShkcAYSIJ2e94Jx_1SseYkYtlSF8a7qsDU0OVFYgM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" - } - }, - { - "ID": "e7711da9b067a58f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "156" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6Im9iajEifSx7Im5hbWUiOiJvYmoyIn0seyJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9XX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "750" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "CI2048Lx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoJ5Us6Rp03fUUqSAPFFGPuqW2qR4gCfzJ4w3W1Kx8C0JGfNDxte0QlBoVEi4K7rV5zkUY1IXQvIk6FOU3DxP8ECZdxkVJBW57Jf5iXo8kRyLVu7OA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDEvMTU1NjgzNTg1ODk2Mjk1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMSIsIm5hbWUiOiJjb21wb3NlZDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1ODk2Mjk1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOC45NjJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTguOTYyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE4Ljk2MloiLCJzaXplIjoiNDgiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29tcG9zZWQxP2dlbmVyYXRpb249MTU1NjgzNTg1ODk2Mjk1NyZhbHQ9bWVkaWEiLCJjcmMzMmMiOiJBYldCeVE9PSIsImNvbXBvbmVudENvdW50IjozLCJldGFnIjoiQ0kyMDQ4THgvZUVDRUFFPSJ9" - } - }, - { - "ID": "de5d78de4310ff60", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed1", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "48" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "\"-CI2048Lx/eECEAE=\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "3" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:18 GMT" - ], - "X-Goog-Generation": [ - "1556835858962957" - ], - "X-Goog-Hash": [ - "crc32c=AbWByQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "48" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqprnxhGEtAMeMcwiKn43QTEdsHrB7k_jCjOqnvH984bk5CIjIhSalrDVNwlLb37IU67jIYyIY1Nq8uaSz5HGUROWwpE6SVmGvIGXnuAbTP7x8Ez5w" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" - } - }, - { - "ID": "4b7c14a3f35089ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "182" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvanNvbiJ9LCJzb3VyY2VPYmplY3RzIjpbeyJuYW1lIjoib2JqMSJ9LHsibmFtZSI6Im9iajIifSx7Im5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIn1dfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "776" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "CP+RiMPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrMYc2wH1kAYUthDQQhZ4gGQU6kYXq6KGs1msu9ifujWYQ2dwS5ifqZFWiTYf74Rq2y9VkzALfhjkTK6anDL-SwHxUH3IwMe8j1rrrbaMbv7eLQCR8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDIvMTU1NjgzNTg1OTU2NDc5OSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMiIsIm5hbWUiOiJjb21wb3NlZDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1OTU2NDc5OSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9qc29uIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE5LjU2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOS41NjRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTkuNTY0WiIsInNpemUiOiI0OCIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb21wb3NlZDI/Z2VuZXJhdGlvbj0xNTU2ODM1ODU5NTY0Nzk5JmFsdD1tZWRpYSIsImNyYzMyYyI6IkFiV0J5UT09IiwiY29tcG9uZW50Q291bnQiOjMsImV0YWciOiJDUCtSaU1QeC9lRUNFQUU9In0=" - } - }, - { - "ID": "26a4cb091ed2f1bd", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "48" - ], - "Content-Type": [ - "text/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Etag": [ - "\"-CP+RiMPx/eECEAE=\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "3" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:19 GMT" - ], - "X-Goog-Generation": [ - "1556835859564799" - ], - "X-Goog-Hash": [ - "crc32c=AbWByQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "48" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-t-yqapXjBzjIrFWFhaT080f06JahbskCp0DC_-izUjPK4fYn556m2qyRDgj9HjkkDPtKR6dauzF5afjNZZ61bN_dTvw15d82A206-SdS1Jv_YM4" - ] - }, - "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" - } - }, - { - "ID": "6b59ed7d3d098970", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwibmFtZSI6Imd6aXAtdGVzdCJ9Cg==", - "H4sIAAAAAAAA/2IgEgACAAD//7E97OkoAAAA" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3227" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Etag": [ - "CNuJssPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoXNmswaWh2Gfcz1pYPy7etwk09Nz_dmuYVq55M2p3ox38A3mC0QgoHf-hL1uCZxQvusJSIHeugTQvmCgZvRdXV-3juD0W8KI669m0EILAHSfnVoJ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nemlwLXRlc3QvMTU1NjgzNTg2MDI1MTg2NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdCIsIm5hbWUiOiJnemlwLXRlc3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24veC1nemlwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIwLjI1MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMC4yNTFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjAuMjUxWiIsInNpemUiOiIyNyIsIm1kNUhhc2giOiJPdEN3K2FSUklScUtHRkFFT2F4K3F3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0P2dlbmVyYXRpb249MTU1NjgzNTg2MDI1MTg2NyZhbHQ9bWVkaWEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2d6aXAtdGVzdC8xNTU2ODM1ODYwMjUxODY3L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nemlwLXRlc3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiI5RGh3QkE9PSIsImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0=" - } - }, - { - "ID": "070a9af78f7967bf", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/gzip-test", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "none" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Type": [ - "application/x-gzip" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:20 GMT" - ], - "X-Goog-Generation": [ - "1556835860251867" - ], - "X-Goog-Hash": [ - "crc32c=9DhwBA==", - "md5=OtCw+aRRIRqKGFAEOax+qw==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "27" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosPALnXk-3j5nBlDDWX7hBP4xBNcCe7lGNkEU-sHHTWsRexFWLoB-1gn8RfnlqS6Q5ZTrR86NBxpP1T0bFxgHnlYUYGh-G3mThRCgGIAWjXggOeOU" - ] - }, - "Body": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" - } - }, - { - "ID": "3ece6665583d65fb", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj-not-exists", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "225" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:20 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uph0zmuYk7ZmOWzbNkj24Iai85wz2vKj0mMnMkIHIoDUlDAUCA0e_CgFT5fV_XAvgUx06I9FspomkB_ImflSVCvj55GK6zEoY5iV1hN96nh4AmPBN0" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+Tm9TdWNoS2V5PC9Db2RlPjxNZXNzYWdlPlRoZSBzcGVjaWZpZWQga2V5IGRvZXMgbm90IGV4aXN0LjwvTWVzc2FnZT48RGV0YWlscz5ObyBzdWNoIG9iamVjdDogZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai1ub3QtZXhpc3RzPC9EZXRhaWxzPjwvRXJyb3I+" - } - }, - { - "ID": "602f4fdfff4e490c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic2lnbmVkVVJMIn0K", - "VGhpcyBpcyBhIHRlc3Qgb2YgU2lnbmVkVVJMLgo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3230" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:21 GMT" - ], - "Etag": [ - "CNat6MPx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWSi2VcVAHJJzhGSPjVT17kZYaCh3uZvWXBU0R4hVmPZJ10gvbv_Ucrfd2kwrBvRYoZaQcYkY5Q3M-oywiCNtLm6SM9InWHz2Omc-0pWJJjfQWPCM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zaWduZWRVUkwvMTU1NjgzNTg2MTE0MTIwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTCIsIm5hbWUiOiJzaWduZWRVUkwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMS4xNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjEuMTQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIxLjE0MFoiLCJzaXplIjoiMjkiLCJtZDVIYXNoIjoiSnl4dmd3bTluMk1zckdUTVBiTWVZQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTD9nZW5lcmF0aW9uPTE1NTY4MzU4NjExNDEyMDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc2lnbmVkVVJML2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3NpZ25lZFVSTC8xNTU2ODM1ODYxMTQxMjA2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zaWduZWRVUkwvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJaVHFBTHc9PSIsImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0=" - } - }, - { - "ID": "4a1c9dc802f4427c", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Etag": [ - "CAc=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrbFZENfdiDkKpD_H_zm44x7h5bTZ3VD6MEoiiXuMwxHHK-zCSSoPRA2ALNUXoR3EcS1c-huuyZBIiw7ULKii7BNqKz0RMu4lWRxrDRQBOjGwKDDgE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQWM9In0=" - } - }, - { - "ID": "e12cb7360f4fd8dd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Etag": [ - "CAc=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:23 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrVJI9g41JQEDEgaIAOXKSCe2B8xklFKSoIciM7uC0uod7t7NzXfxsIrI6mQeZGkWa2B12xdKpSJBndrNbBvygZ6aXxpBBvL8d80NJ42u8KQC7AaFE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWM9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBYz0ifV19" - } - }, - { - "ID": "d25b1dc2d7247768", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMSJ9Cg==", - "/pXmp0sD+azbBKjod9MhwQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:24 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosBTMUgCzCYlGDKqRCN7Kexl832zyi4FxoRqfYtmTr8yv63z4BAQBeCTLajyZqMbRyzjje7TtPPYomUSv_8zrQ8bFdXlNzbTSeKK8kXi2Ys82jus4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxIiwibmFtZSI6ImFjbDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjI0N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC4yNDdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuMjQ3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiIwRTl0Rk5wWmowL1dLT0o2ZlY5cGF3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMT9nZW5lcmF0aW9uPTE1NTY4MzU4NjQyNDgxNzAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkZpRG1WZz09IiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "1d3df0d90130f5fe", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMiJ9Cg==", - "HSHw5Wsm5u7iJL/jjKkPVQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:24 GMT" - ], - "Etag": [ - "CJrxw8Xx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpMl0_NB41LujALQWmul06CCiuw7okuKlhYG_qilsoXlld2qoYgc2-ABYogriA37QOJMK5ZlJ5sYzP2Mp7CTSzK9uccFLi10TdrGFMUjUpD0OvcXrE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyIiwibmFtZSI6ImFjbDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjczN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC43MzdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuNzM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJjOStPL3JnMjRIVEZCYytldFdqZWZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMj9nZW5lcmF0aW9uPTE1NTY4MzU4NjQ3Mzc5NDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0NzM3OTQ2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0pyeHc4WHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDIvMTU1NjgzNTg2NDczNzk0Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkF0TlJ0QT09IiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a270daf4f72d9d3d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2767" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UranlihhkPcnPzTT2UYs4r6Hritk60lULu-pzO6YX-EWIlMlFiXzKmxaZVgOcPfnCgfJsRktKsL_TbFun1PfupraBREfYklCvRXKvJ_526gW-Jf9zs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dfQ==" - } - }, - { - "ID": "328125b2bc991e95", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:25 GMT" - ], - "Etag": [ - "COr+pcXx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UosP78wuW4e1mPLJgcCbCxBC4Je5LWjKugd0fVK51u-P3qeXbQXj4_Amo25ZHyut2LZvoipvmK95xEvebSyVyzVydKyU2VSbHLDkwnRpR-nr9UGIf0" - ] - }, - "Body": "" - } - }, - { - "ID": "1c7595376917851f", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:26 GMT" - ], - "Etag": [ - "CAg=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqnZXiLrdCFoctnkKUqdvXLdSp9xumFk20gsRJc4xB1CTZ14aLZYwbDJmi90E28Q2_E4klaJBOaB1OCcpbDBoF1N9E9Bk3VEaNYYrHKMoECuO7RMPs" - ] - }, - "Body": "" - } - }, - { - "ID": "4183198b151a3557", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "109" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLWpiZEBnb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "386" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:27 GMT" - ], - "Etag": [ - "CAk=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo361-YkcyyOvd5CL4q3LgTM1Bk4RdkPY3cn-qOm6Dn0vghTnmIE9VKkzGiOnjRNnD7SWXGMqpxaCGvCt6P44D55PqOwAtVTF8PNwmEK9HWCoVXABo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In0=" - } - }, - { - "ID": "e98f53a71d8839c9", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "1789" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Etag": [ - "CAk=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqACfvLjoBUrCN_6ksyMdPlv7yPWQGo1D9u1UiCCgFDRlEwr6NOp18BSxaeTuMIabf2ciNj7_Ba1W6YCz64HlcYyQOoEXNfGYGKcAqGTubXsGHPVk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FrPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In1dfQ==" - } - }, - { - "ID": "28b9a29086758e65", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:28 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozsFdtI43Ltv91E18-CrdRZ4pQDVZXReJOVhpWDu3mxNA7rrWU7hNGl4kPMd0FgcVWDigZA5JBNsYbFV113Ew6Cp2uTsuk0uI1io_mhA6-Lmd2h18" - ] - }, - "Body": "" - } - }, - { - "ID": "8ef9f7b6caefd750", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiZ29waGVyIn0K", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3196" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:29 GMT" - ], - "Etag": [ - "CIXH3cfx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo97hrh9l4pmrJQqaH8Qu3axsdbEPH2Y8GSEso8-ExKuf4ot8o2uS79ROJcgCtql4vLQJ_4L8q0Z7E9E2WYbbl6vKUYKdQiCI0POisS9o_ViNh9-Yk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlciIsIm5hbWUiOiJnb3BoZXIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTM1MjgzNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS4zNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuMzUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5LjM1MloiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyP2dlbmVyYXRpb249MTU1NjgzNTg2OTM1MjgzNyZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nb3BoZXIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ29waGVyLzE1NTY4MzU4NjkzNTI4MzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlci9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InJ0aDkwUT09IiwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ae3a5920bc8a9c3c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3548" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:29 GMT" - ], - "Etag": [ - "CPzK+8fx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrPLZGQqU8j04JqxMmfsqas3HGTlrdx5NM99YR5nCedJYGMSaZ1ynvd2UbUcdwps_gfRo__0XapzHuD3hM_bshff65qlnw_i2cOwp97N2EhfUsZ1X4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS/Qk9C+0YTQtdGA0L7QstC4LzE1NTY4MzU4Njk4NDQ4NjAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5Ljg0NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS44NDRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuODQ0WiIsInNpemUiOiI0IiwibWQ1SGFzaCI6ImpYZC9PRjA5L3NpQlhTRDNTV0FtM0E9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjg/Z2VuZXJhdGlvbj0xNTU2ODM1ODY5ODQ0ODYwJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ItCT0L7RhNC10YDQvtCy0LgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTg0NDg2MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vJUQwJTkzJUQwJUJFJUQxJTg0JUQwJUI1JUQxJTgwJUQwJUJFJUQwJUIyJUQwJUI4L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoi0JPQvtGE0LXRgNC+0LLQuCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0=" - } - }, - { - "ID": "0104a746dbe20580", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYSJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3116" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:30 GMT" - ], - "Etag": [ - "COKVlMjx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqY12miALkWDvx8PHEyL23oxWZlw8Jq7Cn6kS8E8_Tn1tTWWoTuS_pWb5pi9qevCIvqK6aTK56MzwrMuy9axIjpw5yyMZ42MaWK_YFkBr95OnK5IbM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hLzE1NTY4MzU4NzAyNDc2NTAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hIiwibmFtZSI6ImEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMC4yNDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzAuMjQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMwLjI0N1oiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYT9nZW5lcmF0aW9uPTE1NTY4MzU4NzAyNDc2NTAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2EvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2EvMTU1NjgzNTg3MDI0NzY1MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0=" - } - }, - { - "ID": "da19af085be0fb47", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYSJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "19484" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:30 GMT" - ], - "Etag": [ - "CLmmusjx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrAJr55dqTDkY_jBKB0vAWyE0owtzB_kEoH91_FKTr9C3bHd8fWUiTrsruL5mgwXg2l8gbPiOXad-A9HIJ3Zt_AeXq_p0m-Q8LrHQqJ6sAKsbzV4Zw" - ] - }, - "Body": "{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","name":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835870872377","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:30.872Z","updated":"2019-05-02T22:24:30.872Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:30.872Z","size":"4","md5Hash":"jXd/OF09/siBXSD3SWAm3A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?generation=1556835870872377&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLmmusjx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"rth90Q==","etag":"CLmmusjx/eECEAE="}" - } - }, - { - "ID": "96f915707a76c50c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAifQo=", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "2948" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrQIwi81TFCNpDIZUjs_5tSUFqURLnaBF7g2l1sGMuHCTWZeLyr45VWkcc8fUDYnF-P84QixYQbTzJGuJzOOx7mw1qluYDqoORFGPICfvKmNxrZM3A" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" - } - }, - { - "ID": "7a562d6e65ef8a7a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEifQo=", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "4785" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ups-3Re4_9njFMmkYwND6BuK5cgxs5tKVRwQ2CGkcks-K0aALbethXTYaEb6fLos6w_FSnM8YkPGM3TETCAoyYpVbQPlotXR9CH3DXngjQNv0yv_ZQ" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiVGhlIG1heGltdW0gb2JqZWN0IGxlbmd0aCBpcyAxMDI0IGNoYXJhY3RlcnMsIGJ1dCBnb3QgYSBuYW1lIHdpdGggMTAyNSBjaGFyYWN0ZXJzOiAnJ2FhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhLi4uJyciLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5pZC5uYW1lLCBtZXNzYWdlPVRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnLCB1bm5hbWVkQXJndW1lbnRzPVthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1UaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJywgcmVhc29uPWludmFsaWQsIHJwY0NvZGU9NDAwfSBUaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJ1xuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5FcnJvckNvbGxlY3Rvci50b0ZhdWx0KEVycm9yQ29sbGVjdG9yLmphdmE6NTQpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5RXJyb3JDb252ZXJ0ZXIudG9GYXVsdChSb3N5RXJyb3JDb252ZXJ0ZXIuamF2YTo2Nylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjI1OSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjIzOSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnRocmVhZC5UaHJlYWRUcmFja2VycyRUaHJlYWRUcmFja2luZ1J1bm5hYmxlLnJ1bihUaHJlYWRUcmFja2Vycy5qYXZhOjEyNilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW5JbkNvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6NDUzKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuc2VydmVyLkNvbW1vbk1vZHVsZSRDb250ZXh0Q2FycnlpbmdFeGVjdXRvclNlcnZpY2UkMS5ydW5JbkNvbnRleHQoQ29tbW9uTW9kdWxlLmphdmE6ODAyKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlJDEucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ2MClcblx0YXQgaW8uZ3JwYy5Db250ZXh0LnJ1bihDb250ZXh0LmphdmE6NTY1KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuQ3VycmVudENvbnRleHQucnVuSW5Db250ZXh0KEN1cnJlbnRDb250ZXh0LmphdmE6MjA0KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0Tm9VbnJlZihUcmFjZUNvbnRleHQuamF2YTozMTkpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6MzExKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NTcpXG5cdGF0IGNvbS5nb29nbGUuZ3NlLmludGVybmFsLkRpc3BhdGNoUXVldWVJbXBsJFdvcmtlclRocmVhZC5ydW4oRGlzcGF0Y2hRdWV1ZUltcGwuamF2YTo0MDMpXG4ifV0sImNvZGUiOjQwMCwibWVzc2FnZSI6IlRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnIn19" - } - }, - { - "ID": "239b642e1919a84a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoibmV3XG5saW5lcyJ9Cg==", - "ZGF0YQ==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3270" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urk-XOPoshQkyIHYZp241R2W5lOuLwqPMx606rubzPOaGggM2z_sZO93dYCJHffXuPDOjy64lIxrBXIYW4QT04eM932jPtzPdVd-PFWssV9RYk83JE" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiRGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJyIsImRlYnVnSW5mbyI6ImNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkZhdWx0OiBJbW11dGFibGVFcnJvckRlZmluaXRpb257YmFzZT1JTlZBTElEX1ZBTFVFLCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLklOVkFMSURfVkFMVUUsIGNyZWF0ZWRCeUJhY2tlbmQ9dHJ1ZSwgZGVidWdNZXNzYWdlPW51bGwsIGVycm9yUHJvdG9Db2RlPUlOVkFMSURfVkFMVUUsIGVycm9yUHJvdG9Eb21haW49Z2RhdGEuQ29yZUVycm9yRG9tYWluLCBmaWx0ZXJlZE1lc3NhZ2U9bnVsbCwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9RGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJywgdW5uYW1lZEFyZ3VtZW50cz1bbmV3XG5saW5lc119LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1EaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IERpc2FsbG93ZWQgdW5pY29kZSBjaGFyYWN0ZXJzIHByZXNlbnQgaW4gb2JqZWN0IG5hbWUgJyduZXdcbmxpbmVzJydcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJEaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnIn19" - } - }, - { - "ID": "3cd795cb0b675f98", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpOXjD9T_c_FRByyyA-CjE753kXZefEzIwQdrNG6OewoHOvKC3Bb9LleziQ6-ymVDg-F1S1eeQeFNJH7nJxDsdA_VmxOjeBzTDB_F3--_1NtwMF6Do" - ] - }, - "Body": "" - } - }, - { - "ID": "361d4541e2cb460a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/a?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpomJFlpfOIJoFvdCmQgD2dRXSREzV1ToI2ntcHi8I9tSyzMCUk0ZrytllkwR_nU8WFZ0YuXVl0Kwsq1EhJL85RlBgCkBNc1P-yVMyNbdt5YrooKok" - ] - }, - "Body": "" - } - }, - { - "ID": "a4e929446e49411d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/%D0%93%D0%BE%D1%84%D0%B5%D1%80%D0%BE%D0%B2%D0%B8?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:31 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqmdInqhuXhUxVjtfRvXvHd6Rmo1ODUbeNFFTBW4diuT7HDjzmYnvd4TU6oJPqbck52yFAR99fMx4Tks1TllSUCB4GsMVbBNENfZU6te-hXIB8XyJs" - ] - }, - "Body": "" - } - }, - { - "ID": "89b2ef62a071f7b9", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gopher?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrRgapcSgK1GOswXS37Vynb9BRj0go8bNPOBtrFZ3TDr-nzIdlvKBQuwHtXl4Tlb-mLl-A65zL5Iw3unCEuy6lwtWl5-V7REtcIVHkrWLK5AoLoHqA" - ] - }, - "Body": "" - } - }, - { - "ID": "ca91b2e4ee4555cb", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", - "SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3213" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Etag": [ - "CLGNmsnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UovWW5HSrWbcvQoWGU3c_n64wwcau06tjEM-fzdADFSbmmylG3VLjae6MRNIM4B9gDitCKfjo1_xPQNAZEmMYzRplVmdI4cIrBC8ursFSUywoCSJeU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" - } - }, - { - "ID": "eb678345630fe080", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3213" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:32 GMT" - ], - "Etag": [ - "CLGNmsnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrtPOErXGrnlQtXCV1PhgO5nz80m75Fzs7DoR-y8Ava3jvL9NDJ4L-i8SIV_tktR8VqCvv9uh-X1ltJCj3isa_oY8TlV-OsZeIiESuDrRTYueQWQP0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" - } - }, - { - "ID": "d76ba152a62e7dc1", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3212" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CLPJv8nx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqk5XoKCyt74JBpeU8oREfzh3e0LLtXupBjcWtsFfHNZDEn4DjcmzWo6resyQ1gEisBp_STlU4hVaU3MbTyX3LM443_Gsu4ouZGdf3ZTvTzsLD_zNw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" - } - }, - { - "ID": "f785ed7f0490bb6d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3212" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CLPJv8nx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrCqpaad9mhxk7VZgTc8xIunote5-AHPp3vYkUbSU8uiVcq65rcnrbtIuCqJ63JTuVJvKw7pFwnLjyn5fL37JzfXLhmLAowAV67_Qqtxxhiy4WQars" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" - } - }, - { - "ID": "096d156b863922d6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm5hbWUiOiJjb250ZW50In0K", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3197" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:33 GMT" - ], - "Etag": [ - "CPW+6cnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoivaSzjVdOm2KP4nXCF8VQDNHXmlNjm9rZwTaWzjvBmH3oK01QYTnHqZ5DhYRewu_1H0KoQgFpUIC-M4OKZoOhRHrq0-H8HLT__SqD1D5cmezrqHo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" - } - }, - { - "ID": "0e96d304e9558094", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3197" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CPW+6cnx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoc-8JwtQprLtec5Ft6q3z4p0ooDaCCLmO9oVgrOXvBpoFhApztj8anfFwEcHdzpB6FBlUFqou0viF_HLHJvHolsxeCCoDOhqyIggOOlWjnSRg8XzY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" - } - }, - { - "ID": "b4c8dedc10a69e07", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6ImltYWdlL2pwZWciLCJuYW1lIjoiY29udGVudCJ9Cg==", - "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3198" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CM/Yjsrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpC3UBVkNENjfRoaL2M1fPRh-iTDTwCsgF8O3TOX_iBuXcDxMjOoTmrIhD4cE5g1s7PObP66TZB1lecFypnLz8hL-aowsOxUMuL0KXJvrgoGxKHUFg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" - } - }, - { - "ID": "c45640ec17230f64", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3198" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:34 GMT" - ], - "Etag": [ - "CM/Yjsrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrvqLsOaMe2H6Xh3MyDCOuHmLzU1fn_Zh6kHLkpTpn4EWj3nFkeROu1g-cABD151cTh8YmY798iEku-Q3TnZIMZ5mexmHiCJKdUhAolbSd9b_apMK8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" - } - }, - { - "ID": "752ab630b062d61e", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY3VzdG9tZXItZW5jcnlwdGlvbiJ9Cg==", - "dG9wIHNlY3JldC4=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3482" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpaWw6jJMKNOoCNMXwJNVVsxqzPhGfMo-5g3krJ7A8_PSGTx4W2OakfmtDvzOpg1hexLJcsWunNHfyWVXsjBpM58dtQlM6VucQW4Uxndlb9RIp0UhM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "3f4d8fd0e94ca6de", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3425" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoO-NbHUptMgzQPx9zApWGCeqMgfkk2mt6PJl1iWbq8ocUYq_0ud8gPuj9bcBTBOBdqewKzSouddziC-BFLoOdQJ6ElkN136q5y39ZRDN4zq6UWLUs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "0355971107342253", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3482" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UryZM2k28xNlFIDnXCH6SJ8RA2lDVNIx-5_eYRExB4-D1-dg_3fWvU3AREwoow4DvlYN4eyzA1AthWs5y0tYZEzvamBoZufkrXPmD64aT8kzAatns8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "8f8c7cba5307c918", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3448" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upnljw7UWMuKFeJUJp_gu-lyfzDRVTwgInddze_9zF5waX4glKKpcRrGpe50cvS8dV_xbbKLQa-CqAMVxlEp7qIHsmTdUR1amjrq0w9U_qrn4gYwSw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuOTIwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" - } - }, - { - "ID": "fc766ddbbfcd9d8a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3505" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "CPjnucrx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqlTuEeQHkdcuHKu_aWtuYjopXaORKyG9TF597i1ybzIPgJHE_oiKH-xYwG_D2txhF_Sv6Jzfy44Dfga1bINCpPLseDEeD_Ez4rvxx8baTX1uJ7lJo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzYuMjI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJyME5Hcmc9PSIsImV0YWciOiJDUGpudWNyeC9lRUNFQU09IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" - } - }, - { - "ID": "fb1e502029272329", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urm5pF1ghApp-RiVg_TAipd-LdTbF-hcDPcVRYbmj7KUIQxnsMUF5WOcrMA1t7ZltdvPZhyfsELISuH6DGJlUUedaCNH-B5j580GjBYLUKCwmAADeM" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "c151ba4fab0c6499", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Etag": [ - "\"-CPjnucrx/eECEAM=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:35 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:35 GMT" - ], - "X-Goog-Generation": [ - "1556835875058680" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "3" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur68mIGQ8VAtTRZs4F6ixZZYKcqe1oTqWHWqmp7gNF-81XKf2xX6eS4PcegbKx7bYnb4i6dzTCoLlbaJRNHwwLVzNbYMkGNy_bTHbWYTgtlBSKVtWs" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "e09c6326cc60ad82", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12563" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:36 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqldLQvGKuu2fihIdwh7CvU7W0yE3vUXu8jqefTPSMhlGgvR1EfeYFqjk_Zxj7fEpv7AjugvHnn1DmkJxJWhrZyM8b5gS4uJID92D_018h2YHH1r30" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "6c83b36cc6fc482c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3527" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGOUzu40Jq-_GztkQxoFN64MHcSoZjph7ivV8tqrp6ENxFIB0c82xqEdKKfcpqbJ2YXtXQgwflZgX-uiVOgGY_1c8SLoV7geQMfErTz1Q0NDM4YDM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NzEwODYxNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNy4xMDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzcuMTA4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM3LjEwOFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzcxMDg2MTQmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzcxMDg2MTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "e499f3b7077fb222", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Etag": [ - "\"c7058d15ad157573e6940c2b95c00972\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:37 GMT" - ], - "X-Goog-Generation": [ - "1556835877108614" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpfEf1WUM8E5dSmOe1sOfgheagnlrcFUolvMMBiP6jDvPqoS7Sx0IwklM-DwnuAbBJPQ_EHhX-42njQu9vUjXqamH1U6np0Vmi0BngqFYOTc9MVP3M" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "9e67dfa901c4e619", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12563" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYPZ-j39Sr1Whn6F3PYeVaxqIIjS_fBgSNkBO7f7B8RMWih6iCCLf1cPDXB3Br-0XBkdm6i9N9fGeK84_laVYWuJc0uJwSCXydk0bSWezw0qec5yE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "670f74ac2a0b734e", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3640" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWwxnT2DHS1ymBhZJmnwxQiajAVYbOH62dyhX86syLNW_TlPnzYaM_7kACpN8ar5EeAHQ9fsMSHP4CqMcb6ZWg5KY3OkbPvfitdLalAA9MXxMFqoo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODE4MzI1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC4xODJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguMTgyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4LjE4MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzgxODMyNTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTlBDK012eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzgxODMyNTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTlBDK012eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkZuQnZmUTFkRHN5UzhrSEQrYUI2SEhJZ2xEb1E1SW03V1lEbTNYWVRHclE9In19fQ==" - } - }, - { - "ID": "9c6e7ad2d8e2c68a", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoSrQtDkljyQ-aUJoC8m6nY20wZiHLTwh4znEZ5hyKxLQUrZIi7PnU416twfTwfPXWvd4cPGZC3NVdpJWg332KnUKSEdFPY2BQwLJ-_9S5eVdwMrf8" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "b5fbec9f26148f95", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Language": [ - "en" - ], - "Content-Length": [ - "11" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Etag": [ - "\"-CNPC+Mvx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:38 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:38 GMT" - ], - "X-Goog-Generation": [ - "1556835878183251" - ], - "X-Goog-Hash": [ - "crc32c=r0NGrg==", - "md5=xwWNFa0VdXPmlAwrlcAJcg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "11" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpTr8Qws5nL-el_ied6M7FYtryiLpFfhGNN-TxakIl1FCAOBJwMeq6_KBQ0l4UmT8VePQQ9h_S8r55NolBB1Ex12-iKvooCkTsNmvQ4rE4hOqQGbLc" - ] - }, - "Body": "dG9wIHNlY3JldC4=" - } - }, - { - "ID": "616de25561ebbffa", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3640" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqyD8XYw6tgOBbcxdUIVAw_vXrrryw0bgh-L-jRW0hyJt0lCHU9K26isn6tykgml7UgX52dgw65xD_ht4yxbHkY_VFl6MkEY6H-mWRx0_52xj49RW8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODk5ODUxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC45OThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguOTk4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4Ljk5OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4Nzg5OTg1MTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4Nzg5OTg1MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkgrTG1uWGhSb2VJNlRNVzVic1Y2SHlVazZweUdjMklNYnFZYkFYQmNwczA9In19fQ==" - } - }, - { - "ID": "2ddf1402c6efc3a8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "160" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13334" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrFitgTFIY6yt8kk3zprPGtWncWc7Ad6oUFVXVdE0O3QqK0qlYGB7RSw-dwN1AvMk4Sndi8gR22cF8qs88ZlmdAxB-1zYdUNqwArmpP2xni66xX-L4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "eea75f5f6c3c7e89", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "160" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "911" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Etag": [ - "CPDp3szx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uotyvz71ZfU1J_hl3NsDz9ldw92nWlIp9muxpBUsdv0nTyfJEqz-yQFyEwE2UM11wv7tkpLQfffGlKhlx7CVK0S1UYJnS2XQu2P0Uiz5bbamHxE520" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTMvMTU1NjgzNTg3OTg1OTQ0MCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMyIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3OTg1OTQ0MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOS44NTlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzkuODU5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM5Ljg1OVoiLCJzaXplIjoiMjIiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0zP2dlbmVyYXRpb249MTU1NjgzNTg3OTg1OTQ0MCZhbHQ9bWVkaWEiLCJjcmMzMmMiOiI1ajF5cGc9PSIsImNvbXBvbmVudENvdW50IjoyLCJldGFnIjoiQ1BEcDNzengvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" - } - }, - { - "ID": "b19fb82ecf450b51", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "277" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urcj3TJ6B5h4Q0DT7G_ioI_Icu9iv65m57OlRH6h9mKD9zZZl320H3QD6A7fNd1UPBSGGDZ9I0TG4_lSFa8JgkJQEVRGVRvVQv36b7ArCuGmg0loV0" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "1f0a12bd00a88965", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "22" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Etag": [ - "\"-CPDp3szx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:39 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Component-Count": [ - "2" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:24:39 GMT" - ], - "X-Goog-Generation": [ - "1556835879859440" - ], - "X-Goog-Hash": [ - "crc32c=5j1ypg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "22" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7DoEkjdrkGGrYpsCDbd2Y6c_Mu8sEA7cN_k06l4WjY25UJsijDFWSlSIA9SCi-ouLuI3if4Rh33a5n5vKcN3oSYjzK92eSPVCczEpQdILKFWyQh4" - ] - }, - "Body": "dG9wIHNlY3JldC50b3Agc2VjcmV0Lg==" - } - }, - { - "ID": "f649a81672a92823", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3527" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:40 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrfhqrYGhW8ew5dxbfg_DcMhaJh6k2oY_JqMwPRSDtS3Ef5kljeo8oTONrxRIAP8I0ScqxFCy7okNejkqOeO4Vr1TBZ2mc4MDjwiJA52ERSgZMUyvM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MDcxNzUwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MC43MTdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDAuNzE3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQwLjcxN1oiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4ODA3MTc1MDYmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4ODA3MTc1MDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifX0=" - } - }, - { - "ID": "61763fd57e906039", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "129" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiJ9XX0K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13444" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7TRBQyfrSgWnsPL0CVfQCd5s_QJN9pRUHB1YttIoObh1YkzCEtuRxrwyKYtccpyodUCMmTVRyD6d0oDy-c2_Q7GK-_6OC5h_EF_1e9-t6E8VZCQ8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceNotEncryptedWithCustomerEncryptionKey","message":"The target object is not encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.encryptionKey, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=entity.encryptionKey, message=The target object is not encrypted by a customer-supplied encryption key., reason=resourceNotEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is not encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is not encrypted by a customer-supplied encryption key."}}" - } - }, - { - "ID": "b45cb452c78d7b50", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:41 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq2YnotZtU1JMnJw10vtgVDeukiWK_4DXx0QFWA91CaCYLPXLDKzCzY8xqb6EGkVxvw731As27REu_hYOqZTwSfJJ6SeHemliLV9JgQYjfngxL0HFA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOC44ODJaIiwibWV0YWdlbmVyYXRpb24iOiIxMCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBbz0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FvPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQW89In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQW89In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7Im5ldyI6Im5ldyIsImwxIjoidjIifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FvPSJ9" - } - }, - { - "ID": "6831287fd0edbcd4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYyJ9Cg==", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Etag": [ - "CJek2c3x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrXGhYaqbrodjXeCKtZGAt0hM0GrM8GBcZBkGkLJ7550-Iqaxp681SkhDDDOp0V3-xMmoq3rjWOC8A4XvJfAzgxGsO2VekCUJgUBMalL8hEAur49q8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" - } - }, - { - "ID": "7588cdc84e3976f4", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Etag": [ - "CJek2c3x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGBHQWnSHLUYlW5nck2w_3QN4x5uYmOM-GIatNzD1tqhzK0JOf7rBe1BKILpdyEUPlSmrLwrukcqJwHgfezE50OQL7ha9n6_fc6qUMoVwkEGawOgA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" - } - }, - { - "ID": "974ed1b42608e806", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "34" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:42 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrKCtuc_GsAj6sf3zXpqT2_KZrdL0SEwsunv07k0DaaEHMj8gkX4kRCcm5r8AUrD9RNYjoKeceiCyY4pDN8nrHljshmEIXF2P29Oyd0KSuqDjsNeps" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMyIsIm9iamVjdFNpemUiOiIzIiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgyNzYwNjA3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYyIsIm5hbWUiOiJwb3NjIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDIuNzYwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQyLjc2MFoiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Mi43NjBaIiwic2l6ZSI6IjMiLCJtZDVIYXNoIjoickwwWTIwekMrRnp0NzJWUHpNU2syQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2M/Z2VuZXJhdGlvbj0xNTU2ODM1ODgyNzYwNjA3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODI3NjA2MDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9fQ==" - } - }, - { - "ID": "a2be30bf1e3cb539", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYzIiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==", - "eHh4" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3150" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Etag": [ - "CILrp87x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqziYHpTkcbyMFRtHko_e04Rze8FHLbALw476U2bB9k_SqPwILBzxKRLzFvRN4q3RLt4xR1mpR2lgJ1Zq0BNYCefLOZsOeq4JbzJYLxGL5V799xkQ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIiLCJuYW1lIjoicG9zYzIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My4xNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuMTUyWiIsInN0b3JhZ2VDbGFzcyI6Ik1VTFRJX1JFR0lPTkFMIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjE1MloiLCJzaXplIjoiMyIsIm1kNUhhc2giOiI5V0dxOXU4TDhVMUNDTHRHcE15enJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzI/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzMTUyNzcwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMTdxQUJRPT0iLCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "d570e13411ade628", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMifQo=", - "Zm9v" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3336" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Etag": [ - "CPCDx87x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrltsLp6xgl5GQBA_ZoqyMKcRlP-MvWyo0epRXUAbOAkOUUpAOgezp4fFQRP5wEM1fq771AWUgtYvQ3HLXCnwxmDaCqJxEhICiYhxDp8RpvCnn3xuI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My42NjNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuNjYzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjY2M1oiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnM/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzNjYzODU2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRJbkNvcHlBdHRycy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "284a06aa5d3f4c0f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "62" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "2972" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:43 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqEuzT5vUHzBcQTT84B5lbxQRZ8Wazbvf7pARGdim0OKOWWM6MdR9KrH11f9w9bxrybc2YHzoveHnAwpEgFzwUcXlLN8xDttCt9pwOnPMX3My8utXg" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkuZGVzdGluYXRpb25fcmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LmRlc3RpbmF0aW9uX3Jlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" - } - }, - { - "ID": "631a4dc25c1479df", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBK3c9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Etag": [ - "CIes587x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up5pzFgsk-trM6xfNGHCutAafrBhKStla4toQDjvEsTPe4TTesYnwc0KLg9WK95RXOKqdm_KUngd4hv6Tucfns_MALlJyx1s6A2cZR3vco6jKPTW00" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC4xOTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuMTkyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjE5MloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0MTkzMjg3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "e60d42eeeafea205", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBL0E9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3301" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ9QOOcNdGntWr097PRNnaDFbt2xarczbyHRwkwxSiwRZIhV26iZGbh5vHIpvl3Z5Dxc7hjtWuutHTMn8jpCTEowLJNAre2A6mZxVNtV52Vh6XDX4" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W2NIK0EvQT09XX0sIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHJlYXNvbj1pbnZhbGlkLCBycGNDb2RlPTQwMH0gUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi5cblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJQcm92aWRlZCBDUkMzMkMgXCJjSCtBL0E9PVwiIGRvZXNuJ3QgbWF0Y2ggY2FsY3VsYXRlZCBDUkMzMkMgXCJjSCtBK3c9PVwiLiJ9fQ==" - } - }, - { - "ID": "5a293e6162b30ef3", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiaGFzaGVzT25VcGxvYWQtMSJ9Cg==", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:44 GMT" - ], - "Etag": [ - "CJuDg8/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqgpXYBQOYHYXTQAVb7IGna0BRqHaB7oBdZ19frcgAVGErLbEHe3bESKZ7zKSO6r0AtvqwfCEuJty95EeD5MkwqIsXbr88YcTG0j7M-g4yTKkejukI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjY0NloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0NjQ2ODExJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "19f19648f1196c10", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3321" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Etag": [ - "CKDem8/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAmKuvPxUsjTHDB5nMuNG92dU0gql0Yol2zlvPDWEDuXHQRLhOJzYBAH207Ot-_IB22pH5QSfTaTayqEWBUfkobCR9YGVa79W0LG4fv9fnAjxk5Cs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NS4wNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDUuMDUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ1LjA1MVoiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg1MDUxNjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "880416f9891fc25f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", - "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "3515" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFC5_DRlr9xktR_eWpfuFqiZRbenVOATtUEg3tfJC8JbCqagKQzn0_sYLvxvG52ordbl2Nwo0L6Y0AbwuenLAo1uoX4uzHnro-JiY8MkdQEsO3uSA" - ] - }, - "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5tZDVfaGFzaF9iYXNlNjQsIG1lc3NhZ2U9UHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W292WmpHbGNYUEppR09BZktGYkpsMVE9PV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UubWQ1X2hhc2hfYmFzZTY0LCBtZXNzYWdlPVByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IFByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4ifX0=" - } - }, - { - "ID": "e1a7bef7c2f0f7b9", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "341" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Etag": [ - "CAo=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:45 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrDioJ0fktjtyI_mABE4-oH9KqDlurM5mWiYDhoDp_LRJYSPvDxjuoaYzNhjoTHdHPALySTzCxScTstm27R-praR27EAm4Gx9k3wHAAwKJUptqRnFE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfV0sImV0YWciOiJDQW89In0=" - } - }, - { - "ID": "df5371a2b0cd8c3b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "317" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaW5kaW5ncyI6W3sibWVtYmVycyI6WyJwcm9qZWN0RWRpdG9yOmRla2xlcmstc2FuZGJveCIsInByb2plY3RPd25lcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0T3duZXIifSx7Im1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIn0seyJtZW1iZXJzIjpbInByb2plY3RWaWV3ZXI6ZGVrbGVyay1zYW5kYm94Il0sInJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciJ9XSwiZXRhZyI6IkNBbz0ifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "423" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Etag": [ - "CAs=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrAVgetrpv1xjtKv2B7KFBdnbg20V-btiRn3sD4kQF7shWIQ1-FqJMRsbYfcYJyTMQHv_BhW_eVzvcc56z0LOcYfE-wbCRZpqYjen6c-bVcMzPet2A" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" - } - }, - { - "ID": "feaa4b1a4dda0450", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "423" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Etag": [ - "CAs=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqwMVZVQQ8s-ZBUJDLKyC-qMMTANncoBhRQWlMCITVUXQrTnFGGxu4GdKyVcuudrSa-DWy5w5iLE-i4a407GopUNma3xIq1eNiyVAziPD7jQ4YluzA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" - } - }, - { - "ID": "23d94c8a09c0c334", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam/testPermissions?alt=json\u0026permissions=storage.buckets.get\u0026permissions=storage.buckets.delete\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "108" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:46 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoR-XKWu9nOWmFZve2-NeDyDvZDo5rYpPD3avVEmZkZ-lODvHCSR0D7zdPeCa61L5EtOIYJBqmC0D9Xc229A1GDS5d91vgAGuzRoxRnNa9DjBw68xM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSN0ZXN0SWFtUGVybWlzc2lvbnNSZXNwb25zZSIsInBlcm1pc3Npb25zIjpbInN0b3JhZ2UuYnVja2V0cy5nZXQiLCJzdG9yYWdlLmJ1Y2tldHMuZGVsZXRlIl19" - } - }, - { - "ID": "3d0a13fe961ed15c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "93" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "518" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:47 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoIRjhruHo5rweyX35Zt-Mzm5UDD5vr4319gSNjtnJHD2RWtVLvQdjuZ0jv-XKT3s1xcJO7CvoU-qAehCknukI5ssvv2LgwazyhnkcuFws3isqM9uo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ni44MDNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImJpbGxpbmciOnsicmVxdWVzdGVyUGF5cyI6dHJ1ZX0sImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "c1f385994f4db8e3", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/user-integration%40gcloud-golang-firestore-tests.iam.gserviceaccount.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "159" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "589" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UozEPu-BKQnmBUxTwsOWB7mmIPC5lppmUt6lA150OiXgQttSVRetVysUp299p7PbjI08qchUQl2idgMbfBCScDL5SuoHi_u9ani0DXYX2OV9QXAovQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "ccb3141a79c55333", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoG9XpooOFPlHtC8fg7llzYYETQUMEm0PvG4TbxBF5KdfCdB_sxNi0Yy9KrAmMDPBVyq0Iuaq7mzQOhQe9lXe5PBOc8-CGTXZiEMkMMJe3OYxjExIM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "2d4244737318f2d5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqn_UdEXhLLXE1QsYjwtyqQlzX3nDvpAO1WNpRkHhyV39RJKkxMBrcd8f6Xo3VZFDps7iKuQHnW49yRqjh42062lkisOH7otDXrdAKAih4Iz9fzERM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b63c3951975a766e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12183" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq-LP6kQLHNpjT5jYVkPzNXyfr838sGr4jGMVPq-FOq01ayCWvyWVAsvUbxp0NSdOYJcW-z0i2NkQLuSKGQYrX_3z8LW1Vb4uL2jxfC8kSiuz28hkA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "0764f2598b67c664", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "3054" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGddSv5GSaZB7qBmb0PigbMbJmWs91JB0W99oOTcbMeUk9QeBf-7QF7W_BpAn3wNxqVD5cBDPgGuKrZCfYv77SgCGxcPW-jrXsUY5h50q8eFL0_3o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "bdb0ff507e4df251", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13039" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up8t6bOWiJucB8KpVlIp8WjWLSUKssQaynZNguVy_oBM2ggBePUFnoteMcbw_L9aSHoD4hwi91dMaBC4aVc6VuaMSBSVuCe0L_0IymxhBIx3SRj_As" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "d2e66ae2cc0d11bc", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3133" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:50 GMT" - ], - "Etag": [ - "COq52NHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqQ_qlqWPPh6t8-gwPlD5zRB96zsmFgzyez4LG3XN4uErivnyHcCeeHje_i95VLJcWYXn7_-knEn0faPBCCAIStf4fXGs_Lz_VzHK6CLBZjU-oBxcU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0NiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDI0MDc0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC4yNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuMjQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjI0MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDI0MDc0NiZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTAyNDA3NDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "89b2bc9232a865a4", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3133" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:50 GMT" - ], - "Etag": [ - "CJTz9tHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrymnpCwS6vZ0kM33ZD4nBupWs9hAuaOGpwiNcS4GJctgaLPe9CQ0Ada06yQV1aEfh9FtX6lgYh9jHwbsT6qdgOYOYe9yGDdAF9f91aE4y4VUHQHts" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDczOTYwNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC43MzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuNzM5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjczOVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDczOTYwNCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTA3Mzk2MDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a70634faba29225b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqsIKUUc6WyIHUlDvAIDlXVeOofVG4sVV3Rus8ktfPMTUTI7e4PH4657AoDnNOKKy9TOgt8yUtUvCNvDFQC1xwVApFofRhWT3kcjsEYRgZPCDDsUqM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "5539336327f2e0ba", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrH7pn9r8O1Zu1Nc2aNUO_CO75MyvOJyONGw27TUkfWRSSmFIWJUZ6F3e3OprVF5jKtAkQ73puZbyNCK6mozyHrmPRRa0mtyuBPhkYg4DzPGkXJOSU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "029ec3d54709e7ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqcpItVCw5LaIWvDckOjfoChIifJGtuURBAmgtpzzna_iVOfDSaQOhxiMDVfYkV3mKG7__z0WTNRdVpa2Zs2IPcIAKwqwSeIMaLtz--MluSCHv9kc8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "bc0d0ed808bc8e68", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpljfpVGJewuMZUyJZ8DU7j2k6JmfPqXXgkeeu8vGdK5j-C_DUgpubS4nFEp1z8EsN-fUdceCxfiy-FGIJiFpME38hWnztW_WIn6zeSh6LCbwIK9oM" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "b695481e00d2d6d9", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "deklerk-sandbox" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrmYf7b7ig2ase_O8qHEsgSAXifAlxVdDy_Zh5Qaqx1IlL1wkTvy1BSblI6ZDz3M2X-Y6KrdJp1IaP6nDU1F_Yhyt84Y7dOG80r2g-x4E7NAyyjV0o" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "2fe3b6cc96ffa451", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "266" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-S2G_BUo7MzXUj-7vUDuVFWfBIA5GJYfTdaeruyelzRGFimnhov8wRB_ozWC2cGPQ-a2xQk8bw_cVV_D2Q7c7rdR1ujaTM3oIjr8vjsJBZXa1VeA" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RNaXNzaW5nPC9Db2RlPjxNZXNzYWdlPkJ1Y2tldCBpcyBhIHJlcXVlc3RlciBwYXlzIGJ1Y2tldCBidXQgbm8gdXNlciBwcm9qZWN0IHByb3ZpZGVkLjwvTWVzc2FnZT48RGV0YWlscz5CdWNrZXQgaXMgUmVxdWVzdGVyIFBheXMgYnVja2V0IGJ1dCBubyBiaWxsaW5nIHByb2plY3QgaWQgcHJvdmlkZWQgZm9yIG5vbi1vd25lci48L0RldGFpbHM+PC9FcnJvcj4=" - } - }, - { - "ID": "59bdcad91d723d0e", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "gcloud-golang-firestore-tests" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "5" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Etag": [ - "\"5d41402abc4b2a76b9719d911017c592\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:24:51 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1556835891548001" - ], - "X-Goog-Hash": [ - "crc32c=mnG7TA==", - "md5=XUFAKrxLKna5cZ2REBfFkg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "5" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up3lrNfk5bvup2RRbcsT6CeDACQitaiF3BTMyQ7B23ACtTqwWweq9ooOfZn3CX7tsbzljhlY_009nsTXXQwm7V_xNNd8FZwT44CzSpul-SZIRODC2w" - ] - }, - "Body": "aGVsbG8=" - } - }, - { - "ID": "7b7b6a79cd2a53e3", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ], - "X-Goog-User-Project": [ - "veener-jba" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "342" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpT7Ge_QpcDrqajg6a4kZrmoA55ZSma9IM3pyz0Ow1bu5nLURc0V7cAThbvikg47_O-ox0uMdF6zBbtVtKL7olyMUGd9wqCQ9ubELVoJXiNMDehvXY" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RBY2Nlc3NEZW5pZWQ8L0NvZGU+PE1lc3NhZ2U+UmVxdWVzdGVyIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBwZXJtaXNzaW9ucyBvbiB1c2VyIHByb2plY3QuPC9NZXNzYWdlPjxEZXRhaWxzPmludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBhY2Nlc3MgdG8gcHJvamVjdCA2NDIwODA5MTgxMDEuPC9EZXRhaWxzPjwvRXJyb3I+" - } - }, - { - "ID": "914bca19df35cbed", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpM7lBsoxgNb-rh81Lfe0tWFGTkXQYfEB12ofkKZ8SK8kK5bPRUsQMVhm3aYB2N9e5_QSBZA25my-t5LpgR8TVW1lNTGd97OoD7bdBhP_CGo3xMbos" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "164b4ffce0c7e233", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqbRkPvid4Zo9mQsbMXlTlOQ1womp_CnHUJcNWRamG_lp4ABhgVPVOdL8lE11J3tmvVkYhzcEPpIJuA_RTvuAbssXFsNo9Fu14cQ44HwhSRYk7r_dU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "d04d3cb2ac9d6376", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12183" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:53 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrcH6PdfDpm0th9SnZgYwMYIiYGYz2VvNs-Nb0VYFbZdA4QXfOJiRFg-xKqmDq09HRt7j8OwTDO7UMm2EiqwjPz7j_JPIvfcCzfCnVVoF3XiZFomuQ" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "8b3aeb1f423d76c5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoX_zzYo9AYFb44YmIz49k5z8v8ITzEQsDrSxEE50-7AC-ndg18Yo5DFAZ9ZFCDtT5P7kch3c0-YlkhX-5oYCUIm-mmPmoTg0CKWVI0TJ9la9z9-vE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "f32edc9a9263c0a1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13039" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uok5lyOpE3YUZDwRz_bsA-E-xzWeqaApyadumOOFoA_YmOi_xnvFaJr6gfp9Gaktbpud9mv8qDCIvq1Yu7CyAVY0ScanSfNCke3naR_G7kPKYgrywY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "da73739950dbfa82", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:54 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrdpofRnchjkCQz5LLoghzg-RHoGATH3xbgJVW0LKGBkH025w5EB5RlGmHHnmXuPtK_5Ffyl1bxjmoc_h7_vGvjcxWbHv9-NKe9fLO_oNYRmw-ffvU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTQuNzM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9" - } - }, - { - "ID": "94e697431c9323b9", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqhr0guIL00afW_qpaRvxEGRSJEWTtfrZP2Tv8Vqt2xas1ivvvMHVbSpT12ltU7FjytFhno9SFXTS8pW1S0S8qJdFctUCa83tkcQRXgN1uPUwhX1ws" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuMTMwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9" - } - }, - { - "ID": "3c5d694690a75054", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12375" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UogsU3nh3dfRYWq9YqTmJc7CfCX1U0b-IrWpDCM7M4YwfuUWgzzzcydWdHLrF0UPqRxNFQj3eMHBbDuHYcF0xuUNah4g5J_nt26feHRp-moh7uFXNw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "e050063f79f64820", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3216" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:55 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq4hs_SY8_2HHPHu8NfVIkOdwmZz7h6XH0nWRSTQJSqzQZQGvv1p83Z6W209h3bXsbN9_ESiU3OIyLRwmEldfBP94hRhN5t1JnagcKPcMEOv8FtvHs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuODI0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9" - } - }, - { - "ID": "f7edef926cdb7d75", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13231" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqyQ9DLrBDGkx7uOkrIKrzMMYA4YygdFsd1si16n2Lr4I5Rfp1lxwadC3jThiJbE-cDyuZATHtvM2bzpsRDPJzbmKgtxYydHykiB-oCsSFqfwy50gw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "2745d195a40b86f1", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:57 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpGeWJRYTNsu57knuRFhwHuoeUMIJhUtuQj4PLhGDoFcITB2Y--7JgaOCVhepJ6a5RRKvU9d3UrAyrtbGNOVHyW2WAbN_KpfkiFhYq8l7HPERpLDjg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "5258a70437064146", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:57 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqImoOCe_iDYCV8XHpaaEXPn-jeAf0M1MyixGju1tNCSTBIMEKDzh_wxVxapMmXWPnOes7YX0T0Fr663x7eJCl85B0EGXggUgp8bqeDV3cJ-mqeDrY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "5cef2c464b7356c8", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpH5br-AVWN2g1nkkM_FJWZIjgOgh2NGLuiyYUS7QD2DSs6B-c0IQHgC2NUyy0eZq4McQhwxBGhUJ05-WeD_bX35i7skN7Jn_q3ELAaDwpi6_x3Z8c" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "11cd5140a264e423", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "377" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2_tsxxuZC3Ni-A0zL0E9w8OVne0sWfLi61Cv48eTWj2R4F6G0AmXZU7J8L-WnDJf4s-6GpZV0SyQqrjBA_Mp75pJfWb6cl9_7oimTy8HPnxAtMwc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" - } - }, - { - "ID": "dac6629e58d45469", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:58 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrTXUzCa6WOvWD6NqoHhkdwRtG0Z4h0QurHOuKU1gyAWzXOt8lc1NVsLqe-6m8siE76k7l8g0EY3Le273_4GFB81gHMhM4qBOOgbwSBgLl4zKHZspU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "1c8663192ed534e8", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqfc2TzyVBAwGuVpWWQt7OYr3IQEmVj5R7LIr5LSRAhwFernkoD6TFVnJno_ria4LL8P9tSQgzCF7uJpStJTpYgTOiW5FqTxQ-AgbOjFmAMjah3o6U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "51858ae7ea77845a", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqc5V7vaPL-d4aMLJi5QXyxZYWWtGBx0QuvfeB8rFlhwPGPFLQUABxm_XXkR0AOeIqdZ-intSu7I5srWdFKX85aY8r-z4oRTuB-CjGUe5uMiXQzXU4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "8248d1314da9a068", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:24:59 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqrg4CYWnZtCkMXT5KHCcQXY-aSGc6JuuEoVkTtKgObB9qr-dE-5sMKCrbTfpFudhid3ulDpkNuOUmRmBy4k7QQoRxqgues_a8dB85cOybZKZTM864" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "8c1628782bcf7eaf", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2370" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqmer5wTHG4zziRJUR_QqUbX_xgytHZkMg2KFntbVz8pn55RFN7idAyYcqz3AhqthLD-bHH2Lggdg4MuIdjHVk_kKHxvDdqI0p814avmZPYAhJdvsE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" - } - }, - { - "ID": "6813d64060667ba0", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsvYC9PE_CO5H9MncFfA0uYzQqFBOqu0_zdexcxDTJ22CO_vv1LiRY6ZSC49_FMeUZBA3KdMfOnS7DsSxBpqrLC9ZM6GAs2sFolmZKhp0RWJjvOzs" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "76ac0290041d5a26", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Etag": [ - "CAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur-ejQ-G_PN4CMllngdiEh1SqHQNy8TXBA-htpazVSOadp1oThRsYsQuflX2kwkDQHFpeo1UgDPXqi5BRq8qZUBsM2koJIMOp-APWnB8T_qsTHlY30" - ] - }, - "Body": "" - } - }, - { - "ID": "0de0d173f3f5c2cc", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:01 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLOtWcLW6yZk8n72e3qkdl4ft2-s7FJFYyRv_5REkk9Q6d7qk6Pcpcy_HMCJFgKTV5RZzzIU3k5hceknIV1XNGVWK7gOCFimyeb8sFjNZC7y7rypI" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "eb2af5bce5b9e1df", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqgaldc1OJvgmfZxcZs4i5d6EXMyFK93ZUJLzSK8Vo-qPYKRwhvPzK8GFv3gqrdyjBbvdc2Uz57nCwy10LhC5AD2pscln8VbaQIOAHqxvkwsqV27Rw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "ad55ad01147a8c62", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoQpkkPNbb1CGY8lAzUfymkmZ7PNlRK6hK3deBeoMip13ARTb212DIgf4lgUDM8PVQsAqcnKwzNulbZICoGzHwrsBZfyi7p5TGX2yJHLXSAGEY0MBE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "1ef9ea9b9bc398a6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:03 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5WrZwygTHM0dDH6L1kypZzaAQtCwxt9wA7VXvgnjwrfwdRnTw0D7K3VLYLRTPDcf8mRFn5CetkcimFxfqUDoWLMF7bH1TLiBr38K6aBU74aRWUd4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "5c1cadcffc6c95e0", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo_n__u-lZKgZqveqge9YBw7wQMldCd4t0io7l_L_nlcF4DSPBPvZd9jGPCstE1EYuQS9L-Z3Y53Q15FlqM1IcmRDv6bYEJQWqkpuzCRRa_TLh9P7A" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "4a615256ab8c55b4", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpwGIlBhvugAIHxiObS0RPpJ40OY_R3exJfPh5IHLLB3SrdYtGMpD01QLdxc3E0IPGkBiTrwM-Z2X1gMcxuOqJWJUrhV4xTFe_n5G8bBxzievz39F4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "63c0acd95aa571a1", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqNu5DFt-lpr-_4YX8LRIhoi3KVRYVgFAlG7WOPsY7IGJ2NLZSF3SQhmkvfL_IKZ0FWIwx1nGT6HBBe9t2RYXNE1kXis-9KgEaAWhrEByTud5QGx5c" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "3c6b1be88134f9d9", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "119" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqzTVWCTbjlM7VCFWFbOojrtt8ZMY9bRy0eXj0Uxj3gaYalvwqR0WBrtbX0iGQ-aN5pv3-Acc7EWIoHBa37YbMpx9fuoJUpTswmdpiCOH3wbMN7tIk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" - } - }, - { - "ID": "60cde48b740e9a63", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrLyiKgPsp1RDgH3Q2SGlQAwTvGJwVDvb9U9FfvTysIFo6sO-5AoXrGPQfZpr80aMOEKojmPksURjhsteWSV9gdPsOlPIJMorxvQMHeomMCh-Bgmuo" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "95b915570e7c2375", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrOZjNI5VbLPTLuBwhSAdcgxvqgcFpH-ED4XyEgIbnndmtC_zJ0lkLfudymSRpdRE4NBxn1Vp-AmZnb1ONsP2I1GBnLYfpYRbzFQpmihPrgBkDnaB4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "e4170c637b705043", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqXQonYrNQZxcXoDllGz19Ruu-Eq1_3FVjMndISB0h1O1ojodwFr2Xor2nZHBZgv3NN_YOSrunnT_KYTfBFNdqtVvqVC2L19Qhwk-cxse_FqrMVbIM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "a3918deffd2f8f0f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpCCE2tr_4iWFlwmQkFVvzfskSWvZdvohQYV52oXF-tWUfie6Brp5TYrvUYx7jH8zsTg9qqk_kv0th8pOA1WsHahBUbDYuaqAmW0EpSJI6_xjBPdZ8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "42a941620757dac5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "684" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Etag": [ - "CAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoqov2civPKG8oSKmpVgUSoOD4W3rev4av_O6jgxkc5tSpPMgtxZeG3NznVRuFJLgClao2zPH8BvZIxTAAPUi_R-H0zgPz6ou8k9SMzLrcgW_HW100" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" - } - }, - { - "ID": "ae79f2e22d58681a", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:06 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqP_EhJin_8uqVkiSwAv0jBAI8A0o7nSwNAQ-Z_EdAnq2PItIGJZl5NOE_xcDTFce_ncHfsf6LvV4NMvuxiOsl32BOddZj_6x5dC36QqrKvhXKaQJ0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "99becaf4890b3c21", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Etag": [ - "CAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpWMu40YNkuE3QcNByTmrKw_cnq4a8DVtcknBJ7gTnLdH1BC6CqoFOStPpa6y4PMmdpLpquozlOzvI4NWjgZYp1d9F_wBEyaq-wquTQAWGXNy3qktY" - ] - }, - "Body": "" - } - }, - { - "ID": "586ccc26d5a22712", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsRO9GedosQ-kg2lCQpx2S6C0-HH592CdB3SU9yGMwEEGKX9md7KGeSDhecrm_6Ug6tyYtEMlsMq08YBAKGprXfcN9130mRolu_HVey3CcrXK1nZ0" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "fad4bdab588e0f3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:08 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoUefKoF0asUvxzmAtX5z5m_5FRy9UfliKkjQN8cYyrHAotGwiQA72QN0lYfA0HXbe7gFRokwBAY8R5OpHcEdRq5K9sw8AwHuNtruFtBMxjN0D3hR4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "23bb9ce53da2f525", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upa2KE5JJOXq2FOn7RHasJpOeeHqWU2kk3BC4cQcGQ9jGzh6XIksN6Z6u1NVoCbxW7-3zGiZMdAop-oa_EuyPAauD__JgzNPdngeNeXO7lsGaVI6i8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "46482e47e655836d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urze42blCAS2Dskbf5jkbCkdo5IhfOjcTlcufV1ooZd5x_QD0zK1gRoXLXfvLjKHEKaJ7cuDeSRmNjuqgEoxJFMmXUoPOIetWLYbH0G-lJcM6tydew" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "856a1fb5ffdb000f", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:09 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFSnYuPlv_JejZ0nEjoWRXeLwj0MS-ddTlIaSOjaK5BudyIcIOeVDEeyFYWsF-GCZzMC0F7v-UEw0_lbsYBXCMZcLp6WZu_s3OPDREhpz2s3Txw-s" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "a74c3e1b541a9ffc", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ury3MiMZho3xaSHmFgwHNvKIHTa0_gZzfKvX8FW6iVGNkdSbXvmVypIUuIoKXDjd4rOXpFghdoJlES9A_iXUvZyfWAZLvWqR1w6sDRymrIARVd0dPE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "913da4fa24ea5f1f", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKAxObyNBS_Mew44t3k1u80cLBGQW_I2ohrF96rIjZRxFscjGFrvsOHfLxw78rTRnpHmfA0uXnNz8T2FPc_Op16Lnn1YkfyOezSEhDBx11qZbinEE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "5dbdf24cae75e565", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "463" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:10 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpoJgRZ665WjiUif7SnQ-Vg4B0kJl-UQ_kIk5XgmcQIL5zmCBp3NCxjksybkvNKpGcQnIJSLevfyWd2fugRv5y5vxmEBgqFffIy8ibTIyBDH4b7R74" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" - } - }, - { - "ID": "f41892a142fe6d2b", - "Request": { - "Method": "PUT", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "107" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpThInLTHhtepE7J4k-Rg6OoLRyOqnjPz4sZmfIH6wp5TSIyLrZQhExKAP52XbwUwMYp7x-xypSMY5kfR66I-dmpRWaFkfs0Lhy-Z6kqb5JUXSppqk" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "5eb0710be2571dab", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpbetUCGwYg4rhIR-DP3ntPqbkFwSqutNltVR55I4qedP-rZ5hC7ospMy6hZ4aQaNOhNv0frmst8MQZ0j6Mp8BcTC6qy7mU8bhEAiMGLycR5XxUWfI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "56aaafb0a24b0644", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqQczDm-dzjgTlLFcJ2lXQrMTpQ_8UP-1hNbGj7eZEgZcbxWA-oyXqEe4tJXq3gYdZ3mEIcAzaDtNeBHWidpIev7kdXWank04_JSWbQtU2UomJqeU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "3bbc40d778939612", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12203" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur5kJWpLwxz76BKPll75-nOsLv_uoh7X-TW0SbNHCYEzkGq8VZ-W8a5tKGUF6rKea4ejr4MhcqJVr1Zu7O-zNAfQWJMsHC9CTkOrPa5Iu1s0KBJDdY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "a2868e9e376e356b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2800" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAU=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoyZfAJB3TroGxzYv0gy7dHUwpU0jtIeU_EyQSFQKzHlHbyRa33r_a-B1aHw5tHaoawuDvv7skisdx10yhmm9ZRrPw0ibwpMkX5Cwhf0OK85DF84jM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" - } - }, - { - "ID": "ffb216dc74ccc27e", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13059" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:12 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqBumxgw_FTJlt7J3xLsBDxljAcEyC2G5aQqhEibawMttB-ogPabqDfz8ZN65WtOikIggZQ8OqW8zigV4Q5vfQkog7NMasAuivcMjhAsgqv_6qWv_E" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "183157f40c6c1bcd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Etag": [ - "COGeqNLx/eECEAY=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrC6xxVQeIIjitNwDv5PhDUIBQbtGdHJDi_AmLs8vTbGzkby-5hguMIwI_UeHlCuGF1u5jBoVwCOgSqJBiJJ5fxh8oCzBn-nHGcGAdIOeQt7gTL0RQ" - ] - }, - "Body": "" - } - }, - { - "ID": "4b6b925053570053", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:13 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpiIBwh04pJH1-X9d1oJluF_2ZCJEFLzF4E7SI-easf2noIolxavCbpH7NP9GMmNXnVK_ZJm4kW8coV5ArXDr-Rwodjnd9Bn2hFiJ_puNeWWldJ6mA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "9b4f9c448170e89d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqHc1VVBdvXFq9aDPAprpXNP5xPt-ZtPdYXwCQPHX_s4gkCMom31AtHeDocV_hker5qryWoYnRD8PPu5c7JqDNQ75v7GgE1vO5xAt7AqWPoLU5MO5A" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "692d41cf9ea91eb2", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11631" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpuFB_mdJg9Fd7Sj06Xdcrj7Up2xXVc_P-SgISEEtiv4v9doPXzNbzBz92Q2UfEchYWSQnikHhS34dUAOefj9J4vaBYqjc9RjFmrn8YcPNyS8aWKgI" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "db035a3cc51de007", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:14 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGb7BqO44vNpZjBnLrSI6_jVCVRFevSqm5eux2DIE2zmUY3sH0T0st7OOsLPcBjvLSbDfijMcdJwsgpM1fQGcerB9bS8Ffd4Qgzdjth0I8FreV4Vs" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "e1d95794cb6be989", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3273" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:15 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVcH2L0r9wGab--fnRuKptLRoUoHCUHfRegh3rKNQExHVsYLnt1NFwOKFa_6hRbeYXO1aY8-F-KiD6IY3sWykIKXr5iqelEJHTWEMQ8wHuzIcWLO8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTUyNjYyMTQiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuMjY1WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1LjI2NVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS4yNjVaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1MjY2MjE0JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTI2NjIxNC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In19" - } - }, - { - "ID": "297f13e741cceb04", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3273" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:15 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UroLXQs6945-UOUyN7qOhICUNnGSzCVrNCv5DD9uMyvgD08zpuufBZj8WM8Fxe0DuOuQowqbPEn1UdJdbFTW7puyeY4zX8fM6UgT7SZIVB0Xi-B0OM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTU4NDE5OTciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuODQxWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1Ljg0MVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS44NDFaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1ODQxOTk3JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTg0MTk5Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In19" - } - }, - { - "ID": "9617f04d2dc95a7c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12683" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqeJiJeDLLZgn9vlRyNk1plY-7dcPXaz47gPNAhMqmSx5ZMvg6pqWj18PVUR65UGuWE7Amtua_zqsv0dVyDZE2FFceMeIQcQ2n6noklvRS9kxPXcEU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "2309e069a65c894f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3333" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxhvVZmlJMvR93BoI0JsL0TYbDN-kmlYHQOOlRfb-Nnqpc8iUImRtJT8Qvx0XWrOsALmLv4Oub_aeCCWkHPvqaF28oAazlhqRZYoxrPlaQlW0N1Qs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTY2MzcxNjUiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTYuNjM2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE2LjYzNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNi42MzZaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE2NjM3MTY1JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNjYzNzE2NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In19" - } - }, - { - "ID": "d2fe101ea1da654d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqjFigs6aYtqXW_7VucOIgJY2MQZwuLv9B12s2ffhWgWDxtJyjLinG4A8U9gKBCpiTt9jUvdHKtaG20qZfyfN9Q1eVAkUQh_zv7ESbqnWhsiheI7zw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "6620f07aff04b6fd", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:17 GMT" - ], - "Etag": [ - "CP/B0t7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpyWMaddvNz0IWnQ6KIl-tv6fJZCvrqTlGuaoHvKqXhxb3O1EKUIEirKSVcaedFZyxtjLhPD7Nj6qf1CRGWG1VnVigo5Wf3sYFDuuLvctxVpoHT0rY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTc0MDY0NjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNzQwNjQ2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNy40MDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTcuNDA2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE3LjQwNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTc0MDY0NjMmYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNQL0IwdDd4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7f88847ad9c49799", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:18 GMT" - ], - "Etag": [ - "CKav+N7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urtn69BB0ytbCZpuCbus4w__tiLeBpqeNzD2gKl5KN2LgP9ZJOIjudGKpDOJRhyW_WQjVOCJBC1CSB_AgE0MdPzVDAKWt13VfQ8aKe3pMWh4y1kOlI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTgwMjY2NjIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODAyNjY2MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC4wMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguMDI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4LjAyNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTgwMjY2NjImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLYXYrTjd4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "a21f3ddca57ed424", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12267" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:18 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrE9jrLJ0W1PUHWT3ldYhVsL9nNhAeFPU2K8g2rCMimty07QlN95esxj7dBAySQ-nzvK17L3WA7vjmxjCpeQADa2bztj6ke8NZjb8fuWkQHsnnzKXA" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "08fda3fa173e6e11", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "742" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Etag": [ - "CKy2sd/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqfGXVJ2-MQaZWJ-JS6runpSycA3GDKIXYHDGf3VBOXnADbUzL2YhZVmlQcW_95jXY0eMkK4Z5tSRk3cxGP_84T_1t3YDYQS2p37ZouBk7GfX-4xxw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTg5NjE0NTIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODk2MTQ1MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4Ljk2MVoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTg5NjE0NTImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLeTJzZC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "0c9e19bd4ec37de2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "127" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13123" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGREJ2AZDcrZiJzPX5ltwlB28vRYhZ_xv4LMIO3La2IgFWk4wd5IVrqBs3z2epgtg7wkPpZm4qPH9zTzSIsXNxdLLHcNMMYr2azidefxtJVgXqgDY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "84daee26f7d989ce", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:19 GMT" - ], - "Etag": [ - "CLmF4d/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoLuw-z5wwTKy-X92_i7pyTrimolUNRUGrElhep6rUH_mmpgu_Vxp-1ncr8x8XNXHLs8NFRW5S7WVUsaDRRTrC5KBPea4EoFLANK8MFY0FpTkXr7_g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxOTc0MTYyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOS43NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTkuNzQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE5Ljc0MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkxOTc0MTYyNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MTk3NDE2MjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "8457a5701f4ced08", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpKF1M6PpAT354ON3809wMCor-B9KITRuEV34MQczP3OD4Co0sJYtNSub2FUzA8qanTDrtANRK0RuxPTz314o3HQro2HLOoyKZBow5CZ4sfsunKHv0" - ] - }, - "Body": "" - } - }, - { - "ID": "1c2ba8a7670e2218", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Etag": [ - "CP+SgODx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoq9hgjjwBbSH1zXN2UlNtWO31Qf5uk4ijjGCQlETB9_5H_CouvU3q4tyHAKEbIBlaW1_mNk3Fsp6LgXqsPPKvFzs3-WHeADAgc2-GpD-0inoYUHMs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDI1MTI2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC4yNTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuMjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjI1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDI1MTI2MyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjAyNTEyNjMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7e6d2f97bd881584", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UohQH2nTuRg_FtGhlfr9zC26C6lKFA-aZTf4GbiV5kWZCJ9E4YLHfIUKLpCLIgEfX9TAvi4oPNWfEBpbY-orlescLxrxYcB1U5fLmzzHQR2R0teR-s" - ] - }, - "Body": "" - } - }, - { - "ID": "48e189abff98ea93", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Etag": [ - "CM3Aq+Dx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq_NscG9s3iq-8NZq8vQhzdc9FJMaAfq_0GborrX84b8gG2zpbUNtEVnrjFjLyOXqBeJwthPgPUPcsGbIcD597LaPlxt-VNaetM2BLtMtYry8pPeQ8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDk2MTYxMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjk2MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDk2MTYxMyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjA5NjE2MTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "02d6922c26c06cbd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 400, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "12243" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrmjSJHhErkbzi8feJ_-8XOMqsIzGrEe6frQzWXQ7dPl0D8MW1mMHClKdwb2zBOTvpx7mXQWy2STZYSqx2o7O1Tr2DB1RZfwCZ3xp1jgw3NCICi7Xw" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" - } - }, - { - "ID": "3fee93fca0ef933c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:21 GMT" - ], - "Etag": [ - "CLfH1eDx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLO91a0iSaTsK7EA2iN621UX-atvJ-8m8BeqeR8_o_CQLB8Pco5FMXznwQY7DZ8u0GPd1BvLE98s5S7T1Z6yL66OKL0CFFUntLBqNAKlji2Dcme6g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMTY1MDYxNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMS42NTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjEuNjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIxLjY1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMTY1MDYxNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjE2NTA2MTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "5c26cf93373a296a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urx3fvI3sQc5JiIazK1OQw5W12poVdjRyBKxIBM6N98jEJli9F0O33KVHHcHaP-QybHub31QmLNngZpzcVKJq33DSaVMW2Vpi5BUaVAAUL2jMoyOxs" - ] - }, - "Body": "" - } - }, - { - "ID": "f8b5020ef71e4047", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3112" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Etag": [ - "CIungeHx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq8QEz45d-uPa7qZ6Nl_SMqw2iepQXoM4aAkTx4YCqArXnSKI1UO3ZhJ5-_PDg3Uq6PPa2JDBTbxckiZCRMQvf0Cv6E_8Cwk3jKXGkneWFJYSeMAts" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMjM2NzM3MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMi4zNjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjIuMzY2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIyLjM2NloiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMjM2NzM3MSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjIzNjczNzEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "7cfadd8c1e7c0bd8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13099" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:22 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpjVnPaklKmbO8qV2lapwEIAwPjvf4HSJs3kNHHO_xZH4bV65pVPLKCb2qjMPCYSwEKRZzH6NpYjm6WlxsBdJnfBvpyRpIR017q9iJDyBcVPNUoDn8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" - } - }, - { - "ID": "b1381b9995cf35f5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrD1hmfKK3yAwFXKqIwvCsEVMb_e7MWysJc7zbyVjZ_sbD9P41lzc0gYSef9yl8CRa6B0RtPq14V5MBSdy6C0i_cAkgANDN1da3O5QKELvACU11moU" - ] - }, - "Body": "" - } - }, - { - "ID": "5e06ccad06bbd110", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2FRcf4QGZH2KhkP3kATb9eOy4S9lgrA3NQXYqhc1KBZDbcZN5NHF4MB6WiTxOraNE0HhkW3SOg8sp1gS4kAcWRZ2G0EGWmLgL6RUcwr3a2O-WY10" - ] - }, - "Body": "" - } - }, - { - "ID": "b637ccaa36faa55b", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:23 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGzSAdUK8pPs53JjaSJVQJObMJRsfQ5FIpSL0D4tNfft68cfE8Xb5fjSsAQG_PfmKZyX1Ind-s6dW6FgwREeHzu_OnEeOwCOaT8EZi2N4vNeWLvac" - ] - }, - "Body": "" - } - }, - { - "ID": "15055c361faad5c1", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqwmXVRTmXX3P5iId4-EjuA1oviIQD-Vbdr7M5rgd7d5IxNvmPVkZz_zFyMBNoHd_DzXnPMN5R7grAe1yDdOzbyafIBwRcFsLh4r0a7ghp4LkNSLZk" - ] - }, - "Body": "" - } - }, - { - "ID": "307f8a44d4ce6e9f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "32" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpaHmcEJBKGI7CtW9fK8-j5Z-oRk_dTwMS40wVaoNH1PN7k6x6d_9aD3w3ce6WGZJF5OYxkdoifp6L2vVz5lIGjzcmmIZ4Wxp1vktBOv8hLa417Mp4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" - } - }, - { - "ID": "f8df88961018be68", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "121" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "297" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:24 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqWiAA6LvnU4rExe2rVTwwFd51SI93o_d2tOj9OY1jk_qe1Yd1eiTXIa2eNecJQsmgwuFr4BUQoWy8ndutrCqGucmVIRL0jA6-i-vaGGVRF8uEe0AM" - ] - }, - "Body": "eyJpZCI6IjExIiwidG9waWMiOiIvL3B1YnN1Yi5nb29nbGVhcGlzLmNvbS9wcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvdG9waWNzL2dvLXN0b3JhZ2Utbm90aWZpY2F0aW9uLXRlc3QiLCJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJldGFnIjoiMTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm90aWZpY2F0aW9uQ29uZmlncy8xMSIsImtpbmQiOiJzdG9yYWdlI25vdGlmaWNhdGlvbiJ9" - } - }, - { - "ID": "45f44fa6c3668273", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "340" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFWgB9V-ilyDIBP7KEF3SY7uTg-rikYE1ooEgWYkiXb2cW6oOwwtH1W7oe24WU2A9oyJIGOSskz3_icHqmMn7CfPoZQ-Lu5QdtZzv_5062_-UzAt0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIiwiaXRlbXMiOlt7ImlkIjoiMTEiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCIsInBheWxvYWRfZm9ybWF0IjoiTk9ORSIsImV0YWciOiIxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9ub3RpZmljYXRpb25Db25maWdzLzExIiwia2luZCI6InN0b3JhZ2Ujbm90aWZpY2F0aW9uIn1dfQ==" - } - }, - { - "ID": "08c2546e2c262ee7", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs/11?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTmXr5TZZPRnABz_XbbZpjwU3iPz2e802RkcmNFr3sLzUx205tdmAb6vVWMNlYMUGZ5tGoddlqQ_oPnz0Xdvpz8CiD2eDLMDyrMbxVFjc3BinwBo0" - ] - }, - "Body": "" - } - }, - { - "ID": "efc8b67f9c2f4b40", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "32" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UraMBYT56qFvKLe_OPJfpJRhTy2qSpQyRjP6CScOnnijgaQ88eQtVahktX_Vsvy-G7J11d7GNT64FnpqZrewu1sUmuNNLiEtmkqVFILdNcWed6i1w4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" - } - }, - { - "ID": "6dd92f9ce3b15a57", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:25 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:25 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpedRCcxIOhXCWdipyV2E0R3z00CUlOK6rPlof1gpKuQbLeJmvMoPFn28o8zqmmeVJ5rbX41bB6Hp116-_ISgEXl4Htmc1VS0Aq41lJQiN_mIvozbY" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "914cb60452456134", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2F\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "12632" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpLE4EijIrGqoHRLrCqmprZtxBU2JtHLLsZ-nIakblnuZX8s6FZA1SEaczyybdjB32loN1-gVCf83PFM4x06S24ATKp9-xRgQ9QNC9fwVH_ec4e9JQ" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF/1475599144579000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF","bucket":"gcp-public-data-landsat","generation":"1475599144579000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:04.545Z","updated":"2016-10-04T16:39:04.545Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:04.545Z","size":"74721736","md5Hash":"835L6B5frB0zCB6s22r2Sw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF?generation=1475599144579000&alt=media","crc32c":"934Brg==","etag":"CLjf35bLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF/1475599310042000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF","bucket":"gcp-public-data-landsat","generation":"1475599310042000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:50.002Z","updated":"2016-10-04T16:41:50.002Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:50.002Z","size":"58681228","md5Hash":"BW623xHg15IhV24mbrL+Aw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF?generation=1475599310042000&alt=media","crc32c":"xzV2fg==","etag":"CJDn0uXLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF/1475599319188000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF","bucket":"gcp-public-data-landsat","generation":"1475599319188000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:59.149Z","updated":"2016-10-04T16:41:59.149Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:59.149Z","size":"56796439","md5Hash":"FOxiyxJXqAflRT8lFnSdOg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF?generation=1475599319188000&alt=media","crc32c":"p/HFVw==","etag":"CKCEgerLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF/1475599161224000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF","bucket":"gcp-public-data-landsat","generation":"1475599161224000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:21.160Z","updated":"2016-10-04T16:39:21.160Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:21.160Z","size":"77149771","md5Hash":"MP22zjOo2Ns0iY4MTPJRwA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF?generation=1475599161224000&alt=media","crc32c":"rI8YRg==","etag":"CMDW157Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF/1475599178435000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF","bucket":"gcp-public-data-landsat","generation":"1475599178435000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:38.376Z","updated":"2016-10-04T16:39:38.376Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:38.376Z","size":"80293687","md5Hash":"vQMiGeDuBg6cr3XsfIEjoQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF?generation=1475599178435000&alt=media","crc32c":"uZBrnA==","etag":"CLiT8qbLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF/1475599194268000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF","bucket":"gcp-public-data-landsat","generation":"1475599194268000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:54.211Z","updated":"2016-10-04T16:39:54.211Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:54.211Z","size":"84494375","md5Hash":"FWeVA01ZO0+mA+ERFczuhA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF?generation=1475599194268000&alt=media","crc32c":"Wes5oQ==","etag":"CODCuK7Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF/1475599202979000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF","bucket":"gcp-public-data-landsat","generation":"1475599202979000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:02.937Z","updated":"2016-10-04T16:40:02.937Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:02.937Z","size":"89318467","md5Hash":"p4oyKHAGo5Ky3Kg1TK1ZQw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF?generation=1475599202979000&alt=media","crc32c":"pTYuuw==","etag":"CLiZzLLLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF/1475599233481000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF","bucket":"gcp-public-data-landsat","generation":"1475599233481000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:33.349Z","updated":"2016-10-04T16:40:33.349Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:33.349Z","size":"89465767","md5Hash":"2Z72GUOKtlgzT9VRSGYXjA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF?generation=1475599233481000&alt=media","crc32c":"INXHbQ==","etag":"CKjykcHLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF/1475599241055000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF","bucket":"gcp-public-data-landsat","generation":"1475599241055000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:41.021Z","updated":"2016-10-04T16:40:41.021Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:41.021Z","size":"86462614","md5Hash":"8gPNQ7QZoF2CNZZ9Emrlog==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF?generation=1475599241055000&alt=media","crc32c":"uwCD+A==","etag":"CJiW4MTLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF/1475599281338000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF","bucket":"gcp-public-data-landsat","generation":"1475599281338000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:21.300Z","updated":"2016-10-04T16:41:21.300Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:21.300Z","size":"318887774","md5Hash":"y795LrUzBwk2tL6PM01cEA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF?generation=1475599281338000&alt=media","crc32c":"Z3+ZhQ==","etag":"CJDt+tfLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF/1475599291425000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF","bucket":"gcp-public-data-landsat","generation":"1475599291425000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:31.361Z","updated":"2016-10-04T16:41:31.361Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:31.361Z","size":"44308205","md5Hash":"5B41E2DBbY52pYPUGVh95g==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF?generation=1475599291425000&alt=media","crc32c":"a0ODQw==","etag":"COjB4tzLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF/1475599327222000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF","bucket":"gcp-public-data-landsat","generation":"1475599327222000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.159Z","updated":"2016-10-04T16:42:07.159Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.159Z","size":"3354719","md5Hash":"zqigvl5Envmi/GLc8yH51A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF?generation=1475599327222000&alt=media","crc32c":"WOBgKA==","etag":"CPCx6+3Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt/1475599327662000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt","bucket":"gcp-public-data-landsat","generation":"1475599327662000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.618Z","updated":"2016-10-04T16:42:07.618Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.618Z","size":"7903","md5Hash":"el/UdDvWR0hfiElvrbBcUQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt?generation=1475599327662000&alt=media","crc32c":"PWBt8g==","etag":"CLCfhu7Lwc8CEAE="}]}" - } - }, - { - "ID": "3f3f45cd2b718e17", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/noauth", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "247" - ], - "Content-Type": [ - "application/xml; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqsZlmkTyf2_fqITeaIiM8s2MLUvz5qFiP_rzA4Mf6Q9LMxsiQeP-GBRwHON_XvnG2qef3XL1EzgTLA_GxtLKZRjdzOBndJPf9XbJa9KjJCIPlducQ" - ] - }, - "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+QWNjZXNzRGVuaWVkPC9Db2RlPjxNZXNzYWdlPkFjY2VzcyBkZW5pZWQuPC9NZXNzYWdlPjxEZXRhaWxzPkFub255bW91cyBjYWxsZXIgZG9lcyBub3QgaGF2ZSBzdG9yYWdlLm9iamVjdHMuZ2V0IGFjY2VzcyB0byBnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm9hdXRoLjwvRGV0YWlscz48L0Vycm9yPg==" - } - }, - { - "ID": "110cdd2daefc6162", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoibm9hdXRoIn0K", - "Yg==" - ] - }, - "Response": { - "StatusCode": 401, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "30405" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:26 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "Www-Authenticate": [ - "Bearer realm=\"https://accounts.google.com/\"" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrGgfg36ZmqdKbc0NqBivwXULVnF2NuBAD31rJqzC5Rj9xgHFbRnwxJCbdxCuUdlAhGW_-H88mBadNGmqch0fmAyAd80HPANBkUmIgkolsL4HK7XAU" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth."}}" - } - }, - { - "ID": "848c7013eb1665b1", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqbrgn51bDFbwZ1c2_BxCHhog7If9w6ooKAtb2YCerQcObpiFJZqT3-Jn7zTXEEPVuysmxKw4PmvEOmkbCAJVkmbEVZm6z877JKFhrXTrkWqWYooq8" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "a5f818071a0f22da", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrozB-0-kEtkjITLFZA2uQpw77J_Zc6GErrYrIyxkPTWeUHJNBRLw4JXhyIEFG8szc7bhqblQVKoKYiZ4myOcfL5zIM9KYGIbCyAP9e4sEEdx2pBe0" - ] - }, - "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "55eeb942e5603431", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Range": [ - "bytes=1-" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "7902" - ], - "Content-Range": [ - "bytes 1-7902/7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqlAytrsQn43Os8JxSOx4C8v9ApqqtqwEE7kZ-voKAcmcYe32lG7ANHxzNrwkqN8bbLLohoAHd88brZDVaC3U6Q01dhBBoeDFnlkCzHKUJjA8ZWrgM" - ] - }, - "Body": "ROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" - } - }, - { - "ID": "320dc27adc377057", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", - "Header": { - "Range": [ - "bytes=0-17" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "18" - ], - "Content-Range": [ - "bytes 0-17/7903" - ], - "Content-Type": [ - "application/octet-stream" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"7a5fd4743bd647485f88496fadb05c51\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 04 Oct 2016 16:42:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1475599327662000" - ], - "X-Goog-Hash": [ - "crc32c=PWBt8g==", - "md5=el/UdDvWR0hfiElvrbBcUQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "7903" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UovperPufphUvaf55r54Wd-USAbL2ZQVTseICyulStqI633iJcFBLryyqecsHQcoU2cXp4MsKgB8uQu979IXcnv-aGNm6viDydIrqmPqA7SmPElPGI" - ] - }, - "Body": "R1JPVVAgPSBMMV9NRVRBREFU" - } - }, - { - "ID": "a20a4e3b35dfd271", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq7491k1eCc7fe_JApsP1zDcSyo759KmHvh-9YHm9ekpOGaG8v1bZNPjaMEkikJSDYt_LkVMHrb9HTDx9vvDGy3Zm1kPrlxS4933Sw-Wdh35lDomi4" - ] - }, - "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" - } - }, - { - "ID": "d1941f2e08f3bc52", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoYIJ-m0VbRdjw7ZGI_0TiIfj6fcuhJkomWPdQGApFxH2_LnekHIdv7igEpJAM3a-zrTOzR20bzvc-JBunTe_-f_Hsyxz_VPxJNrQY7PSrQ3OMfEhQ" - ] - }, - "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" - } - }, - { - "ID": "2451a87df39e1241", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Range": [ - "bytes=1-8" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "W/\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "Warning": [ - "214 UploadServer gunzipped" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Response-Body-Transformations": [ - "gunzipped" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNfvi75DBdUL5KpenQbqbsYr5A8YSQp2lGAPIhe4FlijJP95WctTUrdrLIyq3riprP50HzntCuw7zh5ycZfqbJBkFbibeIwEp5bVDj7yVpJbqctiI" - ] - }, - "Body": "aGVsbG8gd29ybGQ=" - } - }, - { - "ID": "f0e47a86731e8924", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Range": [ - "bytes=1-8" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 206, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Encoding": [ - "gzip" - ], - "Content-Range": [ - "bytes 1-8/31" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:25:27 GMT" - ], - "Etag": [ - "\"c6117833aa4d1510d09ef69144d56790\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:25:27 GMT" - ], - "Last-Modified": [ - "Tue, 14 Nov 2017 13:07:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Accept-Encoding" - ], - "X-Goog-Generation": [ - "1510664852486988" - ], - "X-Goog-Hash": [ - "crc32c=T1s5RQ==", - "md5=xhF4M6pNFRDQnvaRRNVnkA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "gzip" - ], - "X-Goog-Stored-Content-Length": [ - "31" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upzmwe6UNntf6SxdYLZTz0aZiFJ7UANU6y7I2YKJbADGSVoCRe63OoxcC4uh-9n2JEnkvQgq0dwCHCbQ3qmYG4cWFEovG_fIMKFx6O11iPQUhLmeFw" - ] - }, - "Body": "iwgAAAAAAAA=" - } - }, - { - "ID": "2a14c414736e344d", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:28 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgiftU7BDllYqI1jhKj6RbZME3LoakRb653ThaSSD_kzX1O5odabBcDFfG-oswR6YOcDZTW174qxbUaa2G1EvajUt_wJAD3ViGgoMwT_YjoTvObkc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC4wMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "b03cb69547c1a6de", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "99" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiR0VUIl0sIm9yaWdpbiI6WyIqIl0sInJlc3BvbnNlSGVhZGVyIjpbInNvbWUtaGVhZGVyIl19XX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2528" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:28 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urw36CL-_RvF58GoVX1bJlTZ1YUUdWXJKQDIac7eTOYj5s65KYFsk2ndonwA5ro_9nGPfo4qRIYyt7J1Xxb01TiJUQPRXzH-z9T_S5UQE6fxOssN0g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "735da673f622341d", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2528" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:29 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urb-FeCpjdYOEj6HdkFXRMfVXffmMB8xjLqLwPvjgs3FghXs7r94UrXEpX0rW_CgEpjG6v6iHCWCkwxib31kFkLPMZMMZxHTy66dPT2AdBDsIE8Y8k" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "97ce23a7211781ed", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:30 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrqrjU-mu7dGgY6-CuNd9AwgGuKtQ_O589el9eyWSAb54cDYnQN74dl8HxRVzIUayStgIinEuuux2afAlvkUGm9lhrefX8nugBXNL2lGluNcfQKfRo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOS43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "a8f5b1c42d7a3c5f", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "12" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbXX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:30 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ4q-BeojZ4WgF5ZHNzJG7sIvdOxmprH5GqoQ3DM-QllSzHWOwAfhVdJZH7WmVK5Dfs1TQH-pKgneNLb_9uYM-rDFb_5Hh9vs89o4pOPT9ob9ezTE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "cab9472f01ed4bd5", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo0__TFFuzUsEi6Eck0G1lblGy_1abmyPHERWBavjN2GHncyWWnKQ6yYaFDpDVQ-kfBS15M6H9NoxXUDS0hZ9VX8S9hK4rUGAAFWXznHjOntSK5ldY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "7a196e6b35872d48", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "168" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "593" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpJhdZl7X4YXxzKdcr7SJ3C6Xadtlo0ixoS8DNO1jaDHs5MeGKu9nx2pHWPpnUWd13HZ8inxG0pMh3un84TucDfyLu4Z-5Gp7Ka3HBfPnt_w_VTiuE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "24b6a3b712ccab17", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:31 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqe_Q9NPVf3WTO_It8-A42wh0FD3KyF4kopi6posCEX6l28cnowG8O7WSYwETklwvpp0Uk1fCXiVmgwqLlWCSc3ez-nYllKp5m_UVtxup1E1UNadGg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "ebed90fad884e1cd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2539" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrjLjhrtR5T2TdsRM6X3HPy_6vnZjhUfpjytCivwknDLKhiMkND-f1qeZ4te6AnGrv_qtnGI4OGajQJaydHJvQZGWpnBQN9VOXHqYjSBTCxnNbcbC4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "31ee7f558375c66a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up70C3kZ6tTqgyRHbTyjXA5pLWMohouKPHCPhexU8UuWw0cAmLxjHcwaa2xBT-Soq-CLyVxoWeEkxFMhRqqK87xaM38Q2hSjGkeXEhmhoZ3iy5s6D8" - ] - }, - "Body": "" - } - }, - { - "ID": "72458186a1839a5a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:32 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsiYUGF50kaeS-bMsAR51mlDVCwH9HwmqU1hCFSwzrMDgCmGF46ZuQGfqWx4SJprlBWIxaNwT_EQMKQrTQARvFyee5ZOJq21xZUsVEc1iBW0KkiVk" - ] - }, - "Body": "" - } - }, - { - "ID": "20fcdf5cb9502a64", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:33 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up8ALydoTkq1rLUaKiwrh6YEhczTwI3VX8iKuCNoqr6N2BSBvtxD2Qc9De99Svk_4EX82rynlWctsS2F9Ffr4WuDby30_W1Fl7TMWFZGWfo5Wc_nfI" - ] - }, - "Body": "" - } - }, - { - "ID": "ace99589753f3389", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:33 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotVHCMZG4hcbTx20gUPBV1LsUxIAVuDym9d21c2In0VVLzBxOM_DAULuLEd9LTMmpNg6A7yW4yqEsnRlHhAaMbpe7MwAWbadF2gJJ2M7uGdA-rcLQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "466aeae01d2d2933", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVuGt1j5ranQuT0nS5bzOIcVM5M42RwIHcF2mKK_H1ctJEqC_djWDMm00OBFXY4yxsTFdQ1ZYjWVnFq8GhUAWUsvsj4_B9Ur8MzJQcbAzG9vHgIjs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "34d8505e483be050", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "31" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZWZhdWx0RXZlbnRCYXNlZEhvbGQiOnRydWV9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2460" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo5A5V7O5JcpElASqEqUZ_UiEjPa-39PCu3SwJk8EE9cHfyeVL5DWQkAss8k9iRx7YjD4ZC7WnzE_ENz95PuKdccjgzeC2GLFAUzA-sR2iEf-lNil4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "728568d994e275c7", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2460" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrOVH8W8OREkTrF0UKlK3dOp_AAbRF7Rsx16Zbjdl5caBObFjXETVORm7o-d-YYcPIjcrx1mKcQwxHCIXpdE5P6baH6WeY3Wc3JVF7UcQnKna-a_U4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b7230d8a6b23fd8b", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "35" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uouwpmid1Y6D0uXWBb_WrmlZVHKYnw0RGbreddchrmPzQUE1H_DeclZnbl6Yb2346L4YNt44ZeZWEM2u46h7Zx45lw9rUFxZDQIvlXRn4vT6fW4WQQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" - } - }, - { - "ID": "ec9da9e78b750503", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2493" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq25oUSnJMHsIEARHohSGtkz5vw7RCXuQupRBliEjCXMeivgUIK0y9C3U4yWEJ-_182SvGUQLvo5rXxymcoyahwAEhfe21FJU_M2IijdiWsSv83eow" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" - } - }, - { - "ID": "ad49d37f07d631a5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq02fbr_iko_zBCbJljbMwyW7mOaz1cqR6AWHm2ac3m9BcqrUXNkmRqxk0R2rZW6SAUC_KzbOze-1wUGY2E0g6wbYLUqcb8IOdqN2-ROvYO49uxzjE" - ] - }, - "Body": "" - } - }, - { - "ID": "7b04c14c4e323488", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:36 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrJrrmhbKyKRTvA3_mNqzRsaaMOAAlvn8UMwBiOR77-6X6m5InqoXMb4qoollPzgpcFOmchQ54jFNic0xJf-tiGu0tlrzSdbjh5Hzw8Rl3RnJy_jPw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM2LjUwMVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNi41MDFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "ae3e9c648aaff158", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJuYW1lIjoic29tZS1vYmoifQo=", - "X7Xb+/Xtxt2fLTn7y+yBvw==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrJv7oOcHCzf9atnIPo70j3xSldrZtXTb5CmV1iuM4mMPU4hiBgpDqCAdUDI3i9hlR2qRie9qT30AJNshzO5CSsuzoTQ88W255VO8VVumaCSZ0KXZs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "e7a12fdbeb636933", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo-mQ6Bnw1qbAyHbMdumiv5YlTBrFQbHo-9tXn02VufGPTp3_Q8nXXp9VIcdRmzH2CJbhaVLu5fUFYZ6VKgcitBwcE-dCwxWrzaljgyT6zmM7x01ls" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "142eb6c42bc6f90e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "84" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:37 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrBTG1gAXfMDmjm_clsDlv04E6VRtZUsUKnaVr4bC3AIRXILH2f0UebFf1_SmXPSf6Go8sw7TXti9yO112PAG5QNin3LwaSjRuFDSH_unnMZJwRceM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" - } - }, - { - "ID": "12487e4f20538041", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3215" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqxyG_OCKjwh7TR9zL9nbOGPP1ttxni9cP68b18nTZ7wZ8WqVE0MAGh1XFphNYZEYr-2kdgNgmqQrOvFiNbg6hJi-RfjCWYg0KHPLKfzyWc-gejszc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" - } - }, - { - "ID": "e3a625bb7418cea9", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "82" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVf0XeyD5sC9KvHEILX2e7x7VyJ9d9xoyrP3NqGFlsMgvK3zgeOEOXDvSpftUSy4opndA74-LNRkmAi8Q_6wc91iC8OWV7X3VR8kENWwclNK30V_U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "2290ac6bb34b705b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:38 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoKagfJuJDLVLgIcrGtlV_UXlXLSqfEFj2-knllMBWWLktTv22ZfKoeSo8lY6gSfE1LKlvn87WiyJv75QWpcWdXzvablAMBOAZ_XMe-1D8dKZ5ipHs" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "572d50dc9b5ce8a4", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "85" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6ZmFsc2V9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3194" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Etag": [ - "CJbmhujx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrWVb2L8OGfosYWTjk_F-zmyNwltYI93f2tqyP4EkOx7hIRpsSG2iGDFXB7SP7DKn-teczhb9tiBKRM7rYG5vcJ5jVinideFoX44khld5BhtRaQsUc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC45MTlaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSIsImV2ZW50QmFzZWRIb2xkIjpmYWxzZX0=" - } - }, - { - "ID": "bda340f0117c1fb4", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotG32AgmOwDD_bLLd_W64JCOZQ2-UgeY_cOZwomlkWb6Vx5JSE54tdsuCPvawuyJ8eNuCf8EM0qXlIjtAyg6QQiuN9t72vHrQLsrSs6WtJBDGZziU" - ] - }, - "Body": "" - } - }, - { - "ID": "9a0b7379af024a3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:39 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur7S22mcdvPtDq_Z9iUbo4v0ApOQXNFgXFNZuacd8yZUq2FhZKco01Z2T4vJSv8s1yZkGXzoUrAuUKnCBx0vZuE-MDGnmTF_YxhQoXobyvAyktswdE" - ] - }, - "Body": "" - } - }, - { - "ID": "affb380cf86f173c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5In0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:40 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrF4q2mbuFoVeV9q22libc-FjfB1eDXSMYhNGZNU6sbaMWEKdc-8eXu1gtMLC7Ia9bSRU0oxgpIRR9516KZRcg7o_VGdrU1lliVPuSY9rYa4OsJymE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjA4MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC4wODFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "88123486ecd710ed", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJuYW1lIjoic29tZS1vYmoifQo=", - "cGCusp668ZMwY59j94Srfg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrixhGvSHn6UzWmshHjh8Ny2NE4LW-gcPPW4dCm9dZTDfBwtENYMJra88jZlphQFcyabnQwTegZW9_6bL8KYNOsyYQYgdtEDvFFMQeJ9B_IXQbrsgI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "042e51e8b073dc93", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqTH44ERv_v2A5OmhnLVBFOkQ_bPdeDI-MNir5xjrZt1ybrczlk9GwseHZ-kt566XrMnuYGTaCkr4ImF__dpbt98CqMpmYkiJaR1vKcI1DC7o3Dbl8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ed898d656784527a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "83" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urh-jh9w3q8AC6TmG1NF_6JgV-Js74mV6EkB4ccrlZ1ZCQ9TlA4AhJ-J6rkqtJ9fKiy4YjT_l-TN6dQhnCwZvTUqoaJC-W-z6QQZywuQ1uX9a90PhU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "9ddea19a0c44c449", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3214" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:41 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoZVnUhVRVPu1unMjxNHQzZrlqTsVtPMZHc_YPTkvy18AAD3d_TOg9qTNAEv5NXUC5n0XF1sPedifaWR19a5w7dCMltRYfuJUfGxaMzyrvpqKbGjx4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" - } - }, - { - "ID": "9b6c4955472bdb51", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "82" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3192" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoybPBMseeVzkBPlzTObhT-eKnJ9TbsC15btCmXKMDvqgD5Uz4JclrgL9lBqMq09UjP8GL2_3zCDkfA0gJ8SHCDZbHEi1XYcuAUT7hmXazBL3IIMqc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" - } - }, - { - "ID": "0227238a512229dc", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3192" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up6PS1r6NaUKQ9158DD3cKWHqyParXLdcW8k7sToMskeTU6BjpajWENp3qNHqLMFvX9NrK78TIw31h_ANYkltj3vAQoYq2RG5C6ZB2LCvUIch96SMo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" - } - }, - { - "ID": "7a81946424736215", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "84" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjpmYWxzZX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3193" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:42 GMT" - ], - "Etag": [ - "CNC/3Onx/eECEAQ=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo1klBHpEl6qb-hfnVYr2KJLmo_3_1fgqlMYMBpBt23b-U9pF7bPcNEiJaMtrGEhZPQb9-QnqHiU8qPfmpNpSdy9KR41FwZm-89gyu3EtE5gq3J9-0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi43MjFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSIsInRlbXBvcmFyeUhvbGQiOmZhbHNlfQ==" - } - }, - { - "ID": "7865e37b40b9ec66", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:43 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2nfd81k4cBaY8ZFRGIu8G6DHyGUVAdIJCB65E8ZqqO9ADJIV4K22sQaOA8fqGL3jyi3tIszjBVsXHOFrCgca1vLXSpA1-3s2cn-MVhTKpzZ5tzY4" - ] - }, - "Body": "" - } - }, - { - "ID": "fd9104ab049c0173", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:43 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqMqIYvjYuUxhnSLyoY-p5BYUiuZIX7cs418asOvABObfrQc8eXZpo-V4LSZVWWgt4CNoviMvFwcdC9sCTKCZU1H-9Ifcuf8lptTMZpiWMJjZfD-DY" - ] - }, - "Body": "" - } - }, - { - "ID": "e735515a80a67931", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "105" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjM2MDAifX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "573" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqxx-CzGd9qn-QCqeE_iYOkeO93A0IpWx1voocTxhTrdkkBJonYBIWXeaEq3g-LNThw7j85IKVotWyBNZlHzqYSgBchb0DeXXetMqS8WBkY603Sa60" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0My44MDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "94bfaea89e1f47c2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJuYW1lIjoic29tZS1vYmoifQo=", - "29UJUEl2/QvM9FnDBFnPRA==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3245" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CPXIv+vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqLSIzBtq8Qn_3f51_hlfoIkcpamafGwB3U8er3K_mzANwxXyscBHWZhaxb2-3M2wOT6GZER6-zLAtdeeSNtSlUMHkHX77gocrZGYj103HA-AGjVSM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" - } - }, - { - "ID": "7db6d4640ce21307", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3245" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:44 GMT" - ], - "Etag": [ - "CPXIv+vx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoUK3qzYL0xUcMKfWD6mmDZqRfnd8q7O14gBrKng7OYalNXZR3ZegpD1VyqjzW6bgQqFvLrtFjWXwPoJ4E78_nThYByR2n3Hm33ms2fvCdaRSOy1Qc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" - } - }, - { - "ID": "cec800ec28205dab", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAm9jtf7nMSv2Bbn_pFGaLCmlq2CtqUdGeEOyYrkllqiADBk9_xsJS3BufmzfGIdARRHN7jVwbeHKev2CHMyzWoGA8UxNGNIldPCC4oZ1dMPARKHk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NS4yMjZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "c787f6d5b23123a3", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrgP5ghUeBxvXks4XlillwzLwfwyRGxt5y-FImGHSwZ-Meht6M0-keE0YTUG2Qy5hH5i1gpWPyGHXweFDyrk8a3QbDPnG5shDEoxzNOlMEsX6V8-4I" - ] - }, - "Body": "" - } - }, - { - "ID": "144726e64e401a23", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:45 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxuxvNJ3APW11H4y-qweAzn3ChIZ0eRjSetoSHkGwVvO7nMR17h5Hbj8I8zgOGjR7HPA201lDCw47ejD6kj1wCz3ZTIrUz-r-qjdm0vGGszQYFidQ" - ] - }, - "Body": "" - } - }, - { - "ID": "eebfbd37f51a5cc6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:46 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uox0foiLUu9KuEF-KVU8lIN_yUKAJsYEuwVqgSXvzFRGmzJtSZvFeNvMDS8W0aQ7c8YoI89qS_MxU8zBSBqcljYpIZe1NPIvkEFEG5K22RJkAuwmh8" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "c7ddc04b5d29113a", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:46 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpD4zsQLTflaR7g-z0FszOHvw5oXvxqdiyjlAnMwMcBcUEjidrpIGgpvmrz1EWi2AyWkdDCSY0l8ItQxYnWwSZt24jOxXBlzq-gCNrCCE6j0fJVGKw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "e08a38dca4dda51f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrsphQdkHT0ctCFLyPV1JRkq7cyQ7w1tvYmAWv-6tYsuThaMN-vkUJoTNz2gPKGy6jV0yqdbXaAlRlD_5n6GiQa-ULOyKLi-C0q8y25lfk7L6zROE4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "fe586a68284be237", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:47 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpAjkPxULicnoY5wPnfrg-D7yzTbTzTZcUShEN8JSsqVMN758lI4DlkvFibNK9URhYVBH0xclTheAOO_CEDoVb5egvcF-WJqJ3SDisU2YPaZsohRro" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "732c3421adfb99d8", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UohfaTh8JFEjT7TTMwV7NkrcAsgkdpDyovvbjHJLNbXalzOWX3_HwhowBimaqZXhkuvBx1D38zsUZ5dEzigoyoXRJtgHfy7ClUrshBL_yMfPkZ4JE4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "aebe46d0a31a6e95", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2519" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:48 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uop7c4AxllIcwFmLfqhIIaGP3Q9uHfmQoAm9LttwjQIvzHiJvu4fg_Q3UfNP8YbDuP97jLsG78JbCK68eSQv8ZHPp8kpzfdJcvOER0cs75wQ9gC7NA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "bea48b3667650bdd", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo1p1nEuvoUAqYHYa8wuU6qF64gN2JPCxLqqFEUiKXLgo9rwPH_btm63155v5hbuS6sow8cYEvsXVJthsJ1Zn8qfb05USodp1EvTTrt_m1rpJnIxdo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "a951cae4048b1267", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZAHhwxqSSY5MYFK1fdLLirRH41rc7W2YWSnn4CQZkbdR_98fRYjddtqZVfpEizqsZ1BNa5A7ENxQz7BwQSamASc8XAC5gyZp_pwi-Yu8l0XLSCpU" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "e7f0506408261db3", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:49 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UotOOFJlGACBcheGxpS8qxLlGsDT0xOhT_Cg1M_6DxUI16IiOu-YHaPvwzxilOKOrTvL8kRzcVUiNuFeS6rQExTmdK70I1IOH2d2BLOrMm1L5Int1g" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "b2c4589e9b9b5ad2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:50 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoFdLKiYfqS-J3PXjvHGxpXtJwVnUB7Li8XrR-DTYkmyed-ZlUmUcmZFgGyIayuACtUWVYBcRbhYOKvgkfl8srE0OdJbc15OHWBttYqenraJ0MdXj8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "836baafb4e98da80", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:51 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpNuySlLo4SanUdFJK3SGoCidNgWEW4mUuFDGMLLSzY-UUA3TJtN5_FLoLq7et7d1YSUDlOoGZY4a_GdewGQtsUwGdbHLz-bopObpzPy160Wm3UMpg" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "281ffb73e66efd1f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:52 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo8PK9JysFiApLjDSbbo9GloKqKjLO8vr85RWGkeC_Q0iEQf7zu2mwQVeiuJ81pfdJCUja0HrhNG13sczI2XoflqlsWNFvdxHGjCBrvDObr-s6Y32U" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "515ecb8616d48a7f", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:55 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqF93J_Bpwg-_yefVpKWteSyWBSx9FqNm8MP5eZKqVfvDWpSF6EPZsISss7hNOkKMM5OuV8stlJIW2UAkzsSgkYOHx5fCwOwylbtqAyb8OlYJCHz6Q" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "771e55d617425f26", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:55 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up6Slob_akPo2UWNmzCe-LD4cb-9DVTq_FPlNxA7mKVF6fBFks6gFnZ83BSajEU2qD8g14Y-pX20kbfoKf8XGR9pIW2xXH_oHCxsjspHZ2ARGePTMA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "9526aab2be8b214c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UopNhdzHbHv4sXyk0r1owz7bkkPQwYIGbb3gRingbq7vgESKltWLIr9PDDIUNREfumEvZDfpxMineWYNzZ0qdOaI1QNYqXjnuxO6sCUDjBYEhr76uk" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "0aabff8aa1cc1e9c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "103" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpTj6zYv1NMHDI_ktA1d-p7HKq4YnJMxxI9nXubPfCMyIspOgya8HYibmvOMFwLtCPagnzRHe0HbmTCSqJpYMay0xYZstGSE_IUWRqI9lUhzq0Iqe4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "05147b5b5fdd7882", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2517" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:56 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoshhz3RuOTlNgZ3gOLis_dSFBsqr7khyJCdWUD8BGk0_j-AHxA5etttVHuHTrUcDksF4LdtfJLhlVrAJe9PrGiYZ-Wy57j6tb7L6vc0qHoTvolIRg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "7de8d497e7e1a76c", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2517" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up3mjbvLxwRDDhjg8GTGBNcJM7EraDydPsmD77oQcyYYGVlQnUqg0NQbezf4KQOoOHoEnC9jMR052e0olW3qi9KdAi4-qJEiKGI1rGivb5hnJnmU5o" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "3876040740117531", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqGdmh-XzrU5d5VwRugwWr452hENxpWf1sEtNCDaGksLJ1taCgnEjL_0oAlELqirqyXLlw-frE8LYo1V37Pk1G3qvWGk9E1iSZV7uECOjsdE3JagK8" - ] - }, - "Body": "" - } - }, - { - "ID": "39fb456e528c7081", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:57 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqKcAQsTdqHfiXj71c5kivJTwCNhY6xUQpZtaxywsmr1kNG5_MdJBsOvuLVbDSHKRIsG9tDxk_9Gz-7zaqP_pqke1aqKOFg9NeCU4ZqfjD0SBy0Q5Q" - ] - }, - "Body": "" - } - }, - { - "ID": "3fdacef865a6fc39", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:58 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur0PIkQTUvOl8-6tC08mwiS3nBPydo1OYUeD6Q46ZrU6VkmpRwxc9MikGqJlsejuGZkyY_aaxUVKYGREjOF363tfkbII3of6pGFuQ8rINOf8uFV5ts" - ] - }, - "Body": "" - } - }, - { - "ID": "2da48fa864dd9efe", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:58 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoWkbW_2n8KvRkdk7Lh_5eOVBjW6JvoW0i9LNQUHM-Df2H1QIbUYIwFnUGLt-bPfFa46Ur-v8Q76mU24KpjW7WIC_HVq4Q5H96pRYk7I6ggUsckTs0" - ] - }, - "Body": "" - } - }, - { - "ID": "3ef26f9548e0aec4", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:25:59 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqZfOfnhYy6nk2TxHz3h1eMu_0KlczCaOb-xcAIBRoRgxk7Q67BqsT5fvb-vAky2phYzQWNHLTk-0T8V2tpPY4Hg23Ub9evuo6_YEndEVdiBRhTcFI" - ] - }, - "Body": "" - } - }, - { - "ID": "8f3abbeca307cc5a", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:25:59 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqdSQK1TuFGAgs143LV6RLNjzfrsAl5vt-Jb_RQ4dcVGDzK8I29sKKrPmcY0rLIJuVIQar3mdRGhNLhqXp2xCKli28TFq4rtsIgiA4DxCtdVbajIhY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "bb92a1c4ecd9e884", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", - "aGVsbG8gd29ybGQ=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3315" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Etag": [ - "COup9/Lx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrRoCa6t1nTWVUrdfIXfyxZBI6PKBkzO24svoJRDEYMEsduIrzBpdDiX8mIUWSxcL8-pSRZgcDvy9_iHKpoVbiUNt4QhlG7zosGpa3WOk--YlBxjeM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk1OTk1MjYxOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS45NTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NTkuOTUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5Ljk1MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoiWHJZN3UrQWU3dENUeXlLN2oxck53dz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk1OTk1MjYxOSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvc29tZS1vYmplY3QvMTU1NjgzNTk1OTk1MjYxOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InlaUmxxZz09IiwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0iLCJyZXRlbnRpb25FeHBpcmF0aW9uVGltZSI6IjIwMTktMDUtMDNUMjM6MjU6NTkuOTUyWiJ9" - } - }, - { - "ID": "8f2ac73db961f1d6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "13884" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uok63dRFSkGv0Fum7yPhHf_AtYTWceX5_zkck648-jhWkyym1ezg2Og-LqKJ6nnZCW_gPZuU01OkZErKAQbL7mvQroCq8nEtNTEAosMxk9fJRKpYlc" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, unnamedArguments=[]}, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, reason=forbidden, rpcCode=403} Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00"}}" - } - }, - { - "ID": "5a7a0988789dfb17", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "25" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:00 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrZIBPbBTYIUYYaBFrcXzNnp7uuC8DoPry45bX57PyfL7aM5a83NLxYnvWsXD62w3LrRVksNhJbrLnSL5e8ZS6b0Cto8ufh-V00gNrGI-CaTc4achc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMC43NTZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "8410bda61aa0b6fd", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:01 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2MOVFr2ZwvkwYm32qtqndRBhJnBc28nVBEBowKjIIxpOxiYcSef4wtz9sLTJ23PbXr1F8L_TfGwyShv5j1eTsQ-91OXU8Q8JEZtui9VVyK96j2D4" - ] - }, - "Body": "" - } - }, - { - "ID": "7b024f33e16d850f", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:01 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpDkcsqcqPLSySgf13o9jWNafbKdVjSRqGNu3ZedvF7TlL9TBswdMMpcl0zQOHtvXWIrhrcZODr7RjRhHuvS8gJdMbnYeQDHrBCbiH6x9WrnaZ5uIQ" - ] - }, - "Body": "" - } - }, - { - "ID": "33b0efc1b91cfb42", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpItvsNld0b1FGsjQgaVCDxW22UEra8vRGfkWtbDS7R2kMbHRPr0I6iPjzaQBOaa9-xUJxGO-JV6RhMlWwH2NY-FXY2-0lrkZiAzIL-tdFwkCwlF-Q" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "632b9a0387d9cff1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2520" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:02 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpEVtrO8ed8TEHdpzyw857ZykloGKTT56atylmGnu682Y0VxI_T6SWN0JLFupJ_2Cu80ntiZttFSCBqwqFGfxVrRqsNnf0KBDyzqOH8eI-BaWPBXvE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "a32a5924f876577b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=1\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "0" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "639" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpsYL8RxAKClLw1KRdSSIORQHYKKf_HMml2vtwDOFrh0kTYDzhGEgX2RJAPQKOP2-70q7ZZ2VfkkGz12tmTiRgSuwB2dM3xxZxZucASROVeoit9Shg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjY6MDEuOTA4WiIsImlzTG9ja2VkIjp0cnVlfSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "718775f2f8320cfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2536" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:03 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UprHupyIuGJCYeUCPKigfkARZRkCc2Jz051KFrXK7YS0FbdKxNe3r20jon1ejkooHsAtcXQOi1tDV41Ob9dJ3Zoyj7stDImJ0eQriMO0WLLQ__-V54" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJpc0xvY2tlZCI6dHJ1ZX0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" - } - }, - { - "ID": "2f4b1104fa94954b", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "47" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" - ] - }, - "Response": { - "StatusCode": 403, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "13774" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrafLpm39Fg6hBYYi9lSEizIWUdMduEDQykOeT9Bch6uf9BNQUO9sOAnWpTklSpNac1yW_kSNj-JR8LMmVLirC2kqG2DcQdE-Hg5dtVBkVZ4Xhjlc8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., unnamedArguments=[]}, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., reason=forbidden, rpcCode=403} Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'."}}" - } - }, - { - "ID": "a8964565736a4805", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:04 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrNbiNCNtu8DhS-gNXfoe4R9DFKEVzknZ0g6YoI0JdvwH6P199oVJ2oaJwh9WXMte6frhDr_vJo1_PlxLhezMnUTJzMYZgPlPWdU1QN9imk3hXgawE" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "49648eb4e4e2ddda", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:05 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UplUHmGCg4wOKtpKVz2KH0cn-th8s-KXQR62bOkAF1pV1zIZJvsoI_tURcFttkyHks8g05Hln5fT4Ej_O-Lx6JCTovYYwhhHIK7AAcialHMj5fPHk4" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "42cf12714cf13289", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:07 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrcjWbo0pc_i_GQTU2WE6YR0XhWYMGRPJutu1MQXo6XbRKJAYhzNK4p2-_gCqZ6BvNGncUP1UZNoTrXsFFGt1RRTRR3DS9XNiws0gt9YQdp_I8dqqM" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "4f0fdb3eb607f33b", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 429, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12201" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:11 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up0EsbUXQesAgz95sFt6OzWRDj5WAXqEpYaqHgGvCClYnBGvjX8eQmor80UQOCIDzT6v9ZAtOPVhFIqkJG6rV0-CDXiLAnr4gwDrPkqgezvm8PQTaY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" - } - }, - { - "ID": "a5a8b8af13be21cf", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "106" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "574" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:15 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpF7Lqxvm1dNC6q_v81ZB1Ui9c2ZLJ3GXLX6Y6UNZN1nbkF78c0J3NwXavHF1cMFZ5oP8ufCnmCLg9vQHM6tfs2ih8YxQSfE4D3AxGsvBaGNP3VUMo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE1LjA5N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "e1dbaeed8c6d1ffb", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=0\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "0" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 412, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Content-Length": [ - "12155" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:16 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4dRXLSTXYf9VXr3eOCMfuLBwDLvNQMXwIYjwEuSFQO_p-SxZQXQJ7I_VA0T5Hn8gnnEZIqLV3vLCnurXr0cyHDEMpCdxWOiIiqsXlgkbcLKf78to" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" - } - }, - { - "ID": "814dffcceacb3bd8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026kmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoia21zIn0K", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CMGB+Prx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upis-8CwJYvr7glFAMnec_3T3YDojfTz3O_uICK5tQQmfJF2_rFtNnbqopcxBL5L3aKyoYe4zOLoqN4_MFFrhYKDm6Tmrl3rytuh0ymlCV15VZKyrQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "9b05110f2b59351b", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/kms", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "\"-CMGB+Prx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:16 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:16 GMT" - ], - "X-Goog-Generation": [ - "1556835976741057" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpZ6aSiGPoeZKANPoMPgXhL1QD5nbILvzv8v3sOMId95Y65wOPssqesNA1MXzp86JA4Qa5gAMxxx5cnY4z3-tCUnvsKoIHn3NXOK2U6ZSRLzYNeQeU" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "011b81453e7060ac", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CMGB+Prx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uq5t8aexqBqqxnLJFK9mWmGdVrFfd3gaCGOKfGJMO1phB3ATnxt67-cLVpELRy1gNnjNOK7-zMzWCR6yV1v5PQcv3lccINA--Q92RoECpvBON-QKtc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "0dbd0a37c735be56", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur_RD6un7NLXQpzwRBFEfZdp982qhfEzyvw8f_P1Lx1wKv8sUid99lQ7Nba5Rs73IJfv3qHXjQfUDHjWclvCAcS91nMboiexn-FwiMrUuKqCQ_d__4" - ] - }, - "Body": "" - } - }, - { - "ID": "1b81fc72e201f2a2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Encryption-Key-Sha256": [ - "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY3NlayJ9Cg==", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:17 GMT" - ], - "Etag": [ - "CKCxsfvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqRTb3RtqG1gIL-ENbVZC8nWKRHapQP78uVzv4yyL2t7N8cA5jEBWa014iJALicWO06KthvAMPLctgZCJ2UvPqo72IO4_LheEfTr13-_h43M-roUyc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jc2VrLzE1NTY4MzU5Nzc2ODEwNTYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrIiwibmFtZSI6ImNzZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNy42ODBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTcuNjgwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE3LjY4MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlaz9nZW5lcmF0aW9uPTE1NTY4MzU5Nzc2ODEwNTYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NzZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NzZWsvMTU1NjgzNTk3NzY4MTA1Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJJbzRsbk9QVStFVGhPMFgwbnE3bU5FWEIxcld4WnNCSTRMMzdwQm15ZkRjPSJ9fQ==" - } - }, - { - "ID": "6da27052d0dabd22", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026destinationKmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ], - "X-Goog-Copy-Source-Encryption-Algorithm": [ - "AES256" - ], - "X-Goog-Copy-Source-Encryption-Key": [ - "CLEARED" - ], - "X-Goog-Copy-Source-Encryption-Key-Sha256": [ - "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3372" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpKmOV156VWN4edUkrNip1urAj8WTpDcgGTS520rVGilFT71KrZLZQxUMKloxtxKx5R6V9pUvIqYym68P8lQ8RSc5rEE_D1KlH-NyPgybiPERbxYZ4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiOSIsIm9iamVjdFNpemUiOiI5IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21layIsIm5hbWUiOiJjbWVrIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwic2l6ZSI6IjkiLCJtZDVIYXNoIjoiQUFQUVM0NlRybk1ZbnFpS0FiYWd0UT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWs/Z2VuZXJhdGlvbj0xNTU2ODM1OTc4MTI2NDc3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiVUk3ODVBPT0iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSIsImttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MS9jcnlwdG9LZXlWZXJzaW9ucy8xIn19" - } - }, - { - "ID": "6ff8a4be1ed6bda3", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/cmek", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Etag": [ - "\"-CI3JzPvx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:18 GMT" - ], - "X-Goog-Generation": [ - "1556835978126477" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqDR7RHLf_CQraIBM5Z3vCUxSJndE0E6FBOLFBqmYvMntnrQyVIfxwuTE6BtHL4b8oQszU6rIycc72JMUjiGRM_XoGdjsk-WhsbBmL_3c3x5H1s4bs" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "51f1995364e644ed", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3271" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Etag": [ - "CI3JzPvx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpM1hbkOXC-oSxvoqjP8s2MpxREJEpzVp2TTYj-6B8467MLYSA-yrWs9UoGwRGQUCJTR76e-qAmQnbzGsFGT31Kc3qjevb8SM0MynLHL9GhOMkm72w" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrIiwibmFtZSI6ImNtZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21laz9nZW5lcmF0aW9uPTE1NTY4MzU5NzgxMjY0NzcmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDSTNKelB2eC9lRUNFQUU9Iiwia21zS2V5TmFtZSI6InByb2plY3RzL2Rla2xlcmstc2FuZGJveC9sb2NhdGlvbnMvZ2xvYmFsL2tleVJpbmdzL2dvLWludGVncmF0aW9uLXRlc3QvY3J5cHRvS2V5cy9rZXkxL2NyeXB0b0tleVZlcnNpb25zLzEifQ==" - } - }, - { - "ID": "8972a840dc3d95a5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoTmNCzrFh_7cFaryet35nLbKqM6dyo7nvsyZApHIj6fLKcMDb9ZA-_TRNjGOLNFjjoD0IZ1YOz9P-ftNchBFkjWDTaFzBjbHmryCc9JInU3wd_DLw" - ] - }, - "Body": "" - } - }, - { - "ID": "1e28b278fe3e0a66", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:18 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqrX5OILXadm6r5e8fLyuLEhQncM5jrmmDaOesSqraquvQbXXKhTqNJ63RYU3qj90M3FLjwnAOz8oOBTQQDOq2sRqUkbpBbJzYj82Rp3Rd9VLpNns0" - ] - }, - "Body": "" - } - }, - { - "ID": "d1ac5d123ee4bae6", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "200" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwibG9jYXRpb24iOiJVUyIsIm5hbWUiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "609" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpbajYTiCBBelYuwcqOs7_O-SyJbcV6WGwYhC4JCMmeImLlFLb93JyaiYrSp-201iYZTIdQ2VViFR_Emxe2Z7b4jV_G-0o2xzkqnkSnz0-Qcw_q0Wc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImVuY3J5cHRpb24iOnsiZGVmYXVsdEttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MSJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" - } - }, - { - "ID": "d035cdb991d0fabb", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoi5SKd4TdRXg-INW-U3pKcPA2ioBWIc2zdC5W3J4efS9iZ0EKFNqprjQ-NGEK_3wSDGezk4Al3Dd4JovGxXSnH73rIw22S_R__XRrpTQmYpTiNFGQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "672cb2bcc1ed4ee2", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJuYW1lIjoia21zIn0K", - "bXkgc2VjcmV0" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "CLHMt/zx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upyst1aH69iaN9J4055oX7H2yReN5TADIX11Vul8KakNuYZbJ7yXm1GBdvCDVB-IPTvbhzELJWgfurnIHxGpzKJszLO70n58xMnVbSLsHX1J3W02Lw" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "c037ecafa6249395", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0019/kms", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "9" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "\"-CLHMt/zx/eECEAE=\"" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:19 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Encryption-Kms-Key-Name": [ - "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" - ], - "X-Goog-Generation": [ - "1556835979879985" - ], - "X-Goog-Hash": [ - "crc32c=UI785A==", - "md5=AAPQS46TrnMYnqiKAbagtQ==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "9" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrrQOFSgCkxAaKVPAQhz0ohMN6irPryraHLjWDEQgV3dNEwc5cxqcDDRChzx6_HR1Z_NPlm3vmjFCJcNK-2rAz-tmvVS8w36Q5U9Lc2wAGUgL_joX4" - ] - }, - "Body": "bXkgc2VjcmV0" - } - }, - { - "ID": "fc3eda648104f042", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3234" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Etag": [ - "CLHMt/zx/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqHPSbutJRXX-6i05QnH_4UizrPft0fsGc3_ISDNmXQ4C3RPbEAnNfS1Ren0j3k2z8zxk9AV1WZQCWiKG9D5A8gZAZESFxbSZyjbBH8jkLEsEX54NQ" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" - } - }, - { - "ID": "13283bc1ba20f019", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:20 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo2b3OtHWqpsRWxurpcGgrEtM10ZDdsrIa2z0nFsO-P4G3RAGYMTVMR0MCtwLDePrKyhfnsK-d9T5AIcRpeaIe8Z40Qm7gkZP4Fdi9jXd6ceYe5Iwc" - ] - }, - "Body": "" - } - }, - { - "ID": "ca2255586aa4aeab", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "126" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur05CMGbsLFprbwWYFSeE3I5dKFdFUzO--ImyAzkLt6ueNGpMDviyX6c1S4F6t1bqXTSnyQXXQzgugu8JoecrL3-1eJOPsqnN-OBMubntJCblkyWqE" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "e1c336910a43425f", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2555" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:21 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Urcwbg5Z9C0rFvSTWYj4FxF27ooV0028Cm-yme-BrfmvK0dWsRkfRQEMWylWwokcmM2kIvLKEiucJlvDLHvnbJPB_HjH3zF2Lm8xCwV_9gPN42QDeo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" - } - }, - { - "ID": "581a002b653f595d", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "20" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJlbmNyeXB0aW9uIjpudWxsfQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:22 GMT" - ], - "Etag": [ - "CAM=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upv2LawTqc_eDVGR_hv2jSCyEA77H33uMkPevVxUUDKj1u4a9IWOH2f1oMXpU52-49Jg17SUSoJUJai1Ueff3ahN58GC9FCxwJt5v5xOHI3SytFjpo" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMi4xMjRaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBTT0ifQ==" - } - }, - { - "ID": "a0db97c641f3f17c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:22 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo15eoN1OrQFM6eqxJcuNFl7Wfr6x_VvJ0ENBlhhC_Hedf5al2OplGnNslWnRQT9RVs92RVLevZ_F_ohutgnNsUbK1PVsR04UnQ_A6ea3C0ZLy8FkI" - ] - }, - "Body": "" - } - }, - { - "ID": "ca84fc3a71a55ea8", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026predefinedAcl=authenticatedRead\u0026predefinedDefaultObjectAcl=publicRead\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1468" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrnCCRKNh19fdBAL3pyCzbtLnG7z20pSwSZKl9uTerD6o6-B5mB79mqZRIbXNcUmcpTdpxTreKjvsHK3Y9EKq_5fkzBPInDj8YkjbGViIpd1ldSots" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "ed54d82686b4d390", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "1468" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:23 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpXJh1_aAMuDVjYxLZJdwqAt7OwZdmTkc0_5wf_LshhNm_So0q87o7gJ31T58vp1b0_CucSecAyyyOLUbYTDrWDDTZGMkZBpfx1iBArjRtBcud1IgI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "224aee5425ffb86e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026predefinedAcl=private\u0026predefinedDefaultObjectAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "33" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJhY2wiOltdLCJkZWZhdWx0T2JqZWN0QWNsIjpbXX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1113" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:24 GMT" - ], - "Etag": [ - "CAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoknWPZdEFnvKZGCGO_Z26ZaKeXNdBzc6k6qqlbGdQXVEtPvLPR2oYtKCXsqDgFBjMw4wfWVXwDfmcc5u5kBAxshpuzKGgCp8UOxv-i_Rml56mzrOI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC40MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6ImFsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" - } - }, - { - "ID": "b6a80796e20baf03", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o?alt=json\u0026predefinedAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJuYW1lIjoicHJpdmF0ZSJ9Cg==", - "aGVsbG8=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1995" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Etag": [ - "CNDq5v7x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqMfGdFW2xElqOJcP4q5XeDjVehJr4FmjcFPij-Y31f_168_Jgb6zibcWxCOJzrV1aUYYTXZW9DMX-BcvfJi6YXoBCPpN23EhrGQyY7tHSb0SVurGY" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjQuODQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL3ByaXZhdGUvMTU1NjgzNTk4NDg0ODIwOC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJwcml2YXRlIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODQ4NDgyMDgiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNORHE1djd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9" - } - }, - { - "ID": "4812f1e0e907256e", - "Request": { - "Method": "PATCH", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026predefinedAcl=private\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "62" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifQo=" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1529" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Etag": [ - "CNDq5v7x/eECEAI=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqTjsMMIlet4CWUmGoB3TuGzhTHXlDwuUe1SAQO8H2vmbTo2QYYV_WozfDO2NyZpsA9LCtgwwthwxeEeEG182WQTTe3xLWnsVjqPZx-iho8JAgI66U" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuNDIzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFJPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTkRxNXY3eC9lRUNFQUk9In0=" - } - }, - { - "ID": "969763dc18755620", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private/rewriteTo/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026destinationPredefinedAcl=publicRead\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "3" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "e30K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "2017" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:25 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoNk9xKdyuh-I4RkIXqQxMF4A82DZ9b2HYZOdVK7ELOsnMYBqOmrqsY5hzZJsLXMZCZ6zOSzCeefQZuTgYHwyd-jAttJ4QoA1lDk0HTwZqigIfpjCc" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9kc3QiLCJuYW1lIjoiZHN0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODU4NTAyNjkiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW47IGNoYXJzZXQ9dXRmLTgiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuODQ5WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI1Ljg0OVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNS44NDlaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdD9nZW5lcmF0aW9uPTE1NTY4MzU5ODU4NTAyNjkmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2RzdC8xNTU2ODM1OTg1ODUwMjY5L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vZHN0L2FjbC9hbGxVc2VycyIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKMy9vLy94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9fQ==" - } - }, - { - "ID": "8db7e2a6b620e794", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp/compose?alt=json\u0026destinationPredefinedAcl=authenticatedRead\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "130" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6InByaXZhdGUifSx7Im5hbWUiOiJkc3QifV19Cg==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "1906" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Etag": [ - "CJGwwP/x/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpqjCYxNFNTu3V1H_kV5WJxkbEYE9I3H5fYjfGMPO7n6C9NJstYZGcIh6xVA5RAxMCwP1PdzXITgTI1m2vm5xP5nhzLVRHKhder2qzYFGsAooH-kG0" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9jb21wLzE1NTY4MzU5ODYzMTUyODEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wIiwibmFtZSI6ImNvbXAiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NjMxNTI4MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNi4zMTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjYuMzE0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI2LjMxNFoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vY29tcD9nZW5lcmF0aW9uPTE1NTY4MzU5ODYzMTUyODEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvY29tcC8xNTU2ODM1OTg2MzE1MjgxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoiY29tcCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg2MzE1MjgxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0pHd3dQL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2NvbXAvMTU1NjgzNTk4NjMxNTI4MS9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJjb21wIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODYzMTUyODEiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "72a0ce58c513f6d5", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpWOQtHF0cKNME8sTEm9vbS9KKKLLK9afxka1b6TTVGxDjCQ4qAdnA9zshsfVFVKK5hNS10aABAQog_gjYCZTXbbsF4bQNBM0i4R7bK2r7saocPtFo" - ] - }, - "Body": "" - } - }, - { - "ID": "111b5a296b726631", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:26 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqPsYXABoYOnGueMXiIvH9AntIPEf4xc6i18Nnr_XEYBxy-3LSK9klIcNyfSB7we4lgbctsiYA2e2WWBJlfqk1GnK6YA2fj-e3IiPbKwBwTSLjr1Jk" - ] - }, - "Body": "" - } - }, - { - "ID": "27e4065cd2000d43", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:27 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpMPK_b_vcRkSIj1t37L47as9idfpbrNWqzirdNBLwoaGDMPlbjatMzjLThnYRNwk9CqTNQ5H8eoKFbunpe5TMktUo_FHyI7rryoUhbCpkbytRrTeM" - ] - }, - "Body": "" - } - }, - { - "ID": "cccb109f5fba2a3a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:27 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoVAiJOaePqfycNhiwbkPOptOqgpe9Tvo0jo1aW2nviozj4-QcjTtnGGyhcdrZ8QTxT-KHPo-saYS9ESC_6osYhChJOCi8b9nXtuqH0BqlvywXs9FE" - ] - }, - "Body": "" - } - }, - { - "ID": "cfab6af30981f62b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/projects/deklerk-sandbox/serviceAccount?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "116" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrDBTBzZwm7GH6Jklv6hzuaZaO3JdgliJk8zLZkVNDqwDO7tYP-c06rzrTj03llkWuV_l1k9w2tbUcGG2Rtv-1MdozY6pktTutDMLfyhyHe1DaEIys" - ] - }, - "Body": "eyJlbWFpbF9hZGRyZXNzIjoic2VydmljZS00OTYxNjk2MDE3MTRAZ3MtcHJvamVjdC1hY2NvdW50cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImtpbmQiOiJzdG9yYWdlI3NlcnZpY2VBY2NvdW50In0=" - } - }, - { - "ID": "677e783b9aea8c15", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", - "vcVTjdWWssp9XFk0D3Nk5w==" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CPryt4Dy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrdWRt6oMzDM98VpA0VDmWRuNvox2ZBw6ZpjbyOFDKCFKgJ92jsLkF8v4M4UUxaTUVpmr8iElUTld7Q2g5_MZSbRatDE2zVgaY-769Ppq7Nx6f0HAg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "ff3c05fe02cd38ac", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/some-object", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=60" - ], - "Content-Length": [ - "16" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "\"b4769c7d229f0720beffe7251bbabe8f\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:27:28 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:28 GMT" - ], - "X-Goog-Generation": [ - "1556835988273530" - ], - "X-Goog-Hash": [ - "crc32c=Sm1gKw==", - "md5=tHacfSKfByC+/+clG7q+jw==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "16" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqn6PsKCJsyZAhI_JtSOcVKs7SWuqYsoprQTUagoZXqu1CHWyBc1TzWfUyF9iv5VBS4iZPP5qz0Lf3b51p8O36Sud2-QS2zDg9FKhtm1uBFKLpcTOs" - ] - }, - "Body": "vcVTjdWWssp9XFk0D3Nk5w==" - } - }, - { - "ID": "d1c7cc7c0e1d2829", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3262" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CPryt4Dy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqluzdkA1sUAJQJZodwWmM7SP7tNoX2JJjDiqHTy3XtjEqiVDNpI0whWTFJJH4caozRYjJPjTb6_ywm82dnAiYEo-YCknhlvkWexfTq1BM9MRWwQeg" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" - } - }, - { - "ID": "951f74cbe6b30b67", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Etag": [ - "CA0=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:28 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoMRpk7F0qXPvUV4uNPdV7VhfUO2Mm-qPBbx3VV92y1wRyElPF1kbRNEGJ1KqbIW5w2XozD7IzjxBx4KevqcPxZr0_E1iU1_NASedJ4lZj4EFoP9UI" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" - } - }, - { - "ID": "aadc7916a3fc24ad", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11805" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqhoAeZtt8ffiWaPr6J0LEaVR3vjerqtJ7gMM9BObOkz_6MtctDuS_s8-DZRxtTZsGfMDtekIKG2v8p2_sb3MxvTgeLhgO4vqvJNIszTaMjjyQcEdY" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=null, unnamedArguments=[]}, location=entity.resource_id.name, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "03b45b7eac939177", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/Caf%C3%A9", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "20" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"ade43306cb39336d630e101af5fb51b4\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:26:29 GMT" - ], - "Last-Modified": [ - "Fri, 24 Mar 2017 20:04:38 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1490385878535828" - ], - "X-Goog-Hash": [ - "crc32c=fN3yZg==", - "md5=reQzBss5M21jDhAa9ftRtA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "20" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFU2KyS5OA-bAy_MovHXHg5zJZ2kznc6tgTTrSjilrNHiw0NimPHZApcRXOS_ejrSSy8nIn02xipxUWxvHM3ru5IYhJRTeBZxhlq6_XQVsATkvink" - ] - }, - "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEM=" - } - }, - { - "ID": "725883eeccd4b42c", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "multipart/related", - "BodyParts": [ - "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVybyJ9Cg==", - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "3128" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "CKyp9oDy/eECEAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_single_post_uploads" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UruPh9tBADr1LSQWFlj1C4tjlS6uwlcy_MVRYaTGD4e_qEUlq0iXJx6ns3JHbH7I9olH7aSJpDW6VQcV5EdLwjQMecgA7qKhxzLoYFW4BZdEXbUrro" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLzE1NTY4MzU5ODkyOTYzMDAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvIiwibmFtZSI6Inplcm8iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOS4yOTVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjkuMjk1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI5LjI5NVoiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVybz9nZW5lcmF0aW9uPTE1NTY4MzU5ODkyOTYzMDAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8vMTU1NjgzNTk4OTI5NjMwMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJBQUFBQUE9PSIsImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0=" - } - }, - { - "ID": "036f06773f6d6306", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 404, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "11821" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "agent_rejected" - ], - "X-Guploader-Upload-Result": [ - "agent_rejected" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqs9uKpqdPwYRv420tAk0KmBT6DS2POPsIj39ROT-r3ymrd2SB-oOJEXuaHGOfih1ckOqp-YrvHHAh7l7A-6rwlFjoBSb4qCtWzXGPjekN0pUBIok8" - ] - }, - "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.bucket, message=null, unnamedArguments=[]}, location=entity.bucket, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" - } - }, - { - "ID": "6b9dc540a02ed56f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/storage-library-test-bucket/Cafe%CC%81", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "public, max-age=3600" - ], - "Content-Length": [ - "20" - ], - "Content-Type": [ - "text/plain" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"df597679bac7c6150429ad80a1a05680\"" - ], - "Expires": [ - "Thu, 02 May 2019 23:26:29 GMT" - ], - "Last-Modified": [ - "Fri, 24 Mar 2017 20:04:37 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Generation": [ - "1490385877705600" - ], - "X-Goog-Hash": [ - "crc32c=qBeWjQ==", - "md5=31l2ebrHxhUEKa2AoaBWgA==" - ], - "X-Goog-Metageneration": [ - "2" - ], - "X-Goog-Storage-Class": [ - "MULTI_REGIONAL" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "20" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrK69XFh4eDvl0RvSevMYIwMVs-nlDelMhoacS-4faSvuEateXMEqzdMvHClWdzkyqcEk7IQ1N0JMcv5uEPt6V98chYK-p9otm8puF4Uf846EBUEmg" - ] - }, - "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEQ=" - } - }, - { - "ID": "09f71762da168c2f", - "Request": { - "Method": "GET", - "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/zero", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "Go-http-client/1.1" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Accept-Ranges": [ - "bytes" - ], - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "text/plain; charset=utf-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Etag": [ - "\"d41d8cd98f00b204e9800998ecf8427e\"" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Last-Modified": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Server": [ - "UploadServer" - ], - "X-Goog-Expiration": [ - "Sat, 01 Jun 2019 22:26:29 GMT" - ], - "X-Goog-Generation": [ - "1556835989296300" - ], - "X-Goog-Hash": [ - "crc32c=AAAAAA==", - "md5=1B2M2Y8AsgTpgAmY7PhCfg==" - ], - "X-Goog-Metageneration": [ - "1" - ], - "X-Goog-Storage-Class": [ - "STANDARD" - ], - "X-Goog-Stored-Content-Encoding": [ - "identity" - ], - "X-Goog-Stored-Content-Length": [ - "0" - ], - "X-Guploader-Customer": [ - "cloud-storage" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrL3cJhuZXF-arxiYeo4MSf1to_dAoiyMisikOum5rqnrSnFa0OZ25Sl8hUU7lcIeW6P3-dwXK6qv0yHGFYMFnUB0VYpI9BRT16gMo8ikQ_L3gBmew" - ] - }, - "Body": "" - } - }, - { - "ID": "681729281517b9c0", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:29 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrKCNWRWyBsgMgWxsrSjXZMKW22GWVttfUWHw_UNSF7X7hnnz4h-cD7vFzaLO7OVWvnuMyW9qpJAA4eJaERT9hSjGfv41XVw_9ouvBOeIK9ykq-DrE" - ] - }, - "Body": "" - } - }, - { - "ID": "39261fa7e6e9aede", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "60" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIn0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "485" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:33 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpEt-jSVBZqHHBwfNkeP4NXxMdC1ISsFp2E_M6JiSw9rDj2gTnmTZhrvOn-bo_f9Lx_HMxMDgsXfO0ftWqvUjGwdmHAYNnYK5JSJozLfuosPNxv_S4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "3e70965b0aa1111b", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2431" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrB-4r560VjwZdR64GS_7_mZDefc-iM5ma_H8KjYfrdNXt-IlkgvzShsy7iNLseN6coOnkJwWT5tLbP54_M7nMUjord_jmpb1i19SBRA_3RQ_64yys" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "b80f9a99661cd572", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur4ItFRDN2_vB0_Ej095pAE061aywdVAoU_00t3fazm21R5ZLxUHpQL4ifTJ58218EkOeDBSWNaOaWuuzTnTM0DhXTe4l2X3pytOdfcZdIeyLV1rWA" - ] - }, - "Body": "" - } - }, - { - "ID": "979ffb3f3450c3d5", - "Request": { - "Method": "POST", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "Content-Length": [ - "543" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "application/json", - "BodyParts": [ - "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "926" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqM_YYHcsYolLFJJwWhO9AUYmnc3Vp3J6C5AydYOy-0RVAmBxObG8QpwZOsn28vDVllo_OZ1-4bASi_RGi4wdply2CPhzUOCVjP11XNzVYZ4_EiFb4" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" - } - }, - { - "ID": "4931edd6fbd7e278", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2872" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Etag": [ - "CAE=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:34 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpxOq-rTQ-0CFxAKezuPpbibtEmD_yASKzhW5dOiUdwVORx6a7_e1dmDnNCdamDPwLsBOpLueQD8iDbwlracb2uxGwwZ6yz92QD-t-JSKM-jSNp2xA" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" - } - }, - { - "ID": "12c06b5419a1fa81", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uoc24sLEBwU00YKpSYT4V6iJu3WxME2LgbyRWaYj4kX8s6Gjatzl0AWcCwPRhj_Bq0BOoUhKQzS8L0GJlhHC36eyEWEIs2rk2BcUyo0aE3U8XbuHuM" - ] - }, - "Body": "" - } - }, - { - "ID": "bf24dffbe6cb8609", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "2571" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Etag": [ - "CA0=" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqVkn03LSqmt8RiOPdW3JO4dpsS9vRp7VK8Qc0HwhmS3L9KDOgmyZIqLnvAwuAyvoIz1VsbqeoLwMMtz9pCIX216P4K3OpEs-x_bf_VSwMiFehgZiM" - ] - }, - "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" - } - }, - { - "ID": "2e41eac5e693f3e1", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "64746" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqgGJECa6hRJfdjE3ErHERxS3HPU_SALrzjf-HIDuv4lfxMvOwrkF2pfY2jUJ3b8TcIjWcnaa85Rs4LBucKffiSWfFXcfLZMF0EuiVRj9oH6Kp4tOg" - ] - }, - "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1","name":"acl1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864248170","metageneration":"2","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.247Z","updated":"2019-05-02T22:24:25.629Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.247Z","size":"16","md5Hash":"0E9tFNpZj0/WKOJ6fV9paw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?generation=1556835864248170&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COr+pcXx/eECEAI="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"FiDmVg==","etag":"COr+pcXx/eECEAI="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2","name":"acl2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864737946","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.737Z","updated":"2019-05-02T22:24:24.737Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.737Z","size":"16","md5Hash":"c9+O/rg24HTFBc+etWjefg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?generation=1556835864737946&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJrxw8Xx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AtNRtA==","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs","name":"bucketInCopyAttrs","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883663856","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.663Z","updated":"2019-05-02T22:24:43.663Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:43.663Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?generation=1556835883663856&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPCDx87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CPCDx87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object","name":"checksum-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835855962241","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:15.961Z","updated":"2019-05-02T22:24:15.961Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:15.961Z","size":"10","md5Hash":"/F4DjTilcDIIVEHn/nAQsA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?generation=1556835855962241&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CIGhrMHx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Vsu0gA==","etag":"CIGhrMHx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1","name":"composed1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835858962957","metageneration":"1","timeCreated":"2019-05-02T22:24:18.962Z","updated":"2019-05-02T22:24:18.962Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:18.962Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?generation=1556835858962957&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CI2048Lx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CI2048Lx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2","name":"composed2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835859564799","metageneration":"1","contentType":"text/json","timeCreated":"2019-05-02T22:24:19.564Z","updated":"2019-05-02T22:24:19.564Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:19.564Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?generation=1556835859564799&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CP+RiMPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content","name":"content","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835874352207","metageneration":"1","contentType":"image/jpeg","timeCreated":"2019-05-02T22:24:34.351Z","updated":"2019-05-02T22:24:34.351Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:34.351Z","size":"54","md5Hash":"N8p8/s9FwdAAnlvr/lEAjQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?generation=1556835874352207&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CM/Yjsrx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"GoUbsQ==","etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption","name":"customer-encryption","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835875058680","metageneration":"3","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:35.058Z","updated":"2019-05-02T22:24:36.225Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:35.058Z","size":"11","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?generation=1556835875058680&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPjnucrx/eECEAM="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"etag":"CPjnucrx/eECEAM=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2","name":"customer-encryption-2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835880717506","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:40.717Z","updated":"2019-05-02T22:24:40.717Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:40.717Z","size":"11","md5Hash":"xwWNFa0VdXPmlAwrlcAJcg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?generation=1556835880717506&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CMKZk83x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"r0NGrg==","etag":"CMKZk83x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3","name":"customer-encryption-3","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835879859440","metageneration":"1","timeCreated":"2019-05-02T22:24:39.859Z","updated":"2019-05-02T22:24:39.859Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:39.859Z","size":"22","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?generation=1556835879859440&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPDp3szx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"componentCount":2,"etag":"CPDp3szx/eECEAE=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test","name":"gzip-test","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835860251867","metageneration":"1","contentType":"application/x-gzip","timeCreated":"2019-05-02T22:24:20.251Z","updated":"2019-05-02T22:24:20.251Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:20.251Z","size":"27","md5Hash":"OtCw+aRRIRqKGFAEOax+qw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?generation=1556835860251867&alt=media","contentEncoding":"gzip","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNuJssPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"9DhwBA==","etag":"CNuJssPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1","name":"hashesOnUpload-1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835885051680","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:45.051Z","updated":"2019-05-02T22:24:45.051Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:45.051Z","size":"27","md5Hash":"ofZjGlcXPJiGOAfKFbJl1Q==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?generation=1556835885051680&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CKDem8/x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"cH+A+w==","etag":"CKDem8/x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"4","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:17.121Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/allUsers","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"allUsers","role":"READER","etag":"CLnS+bvx/eECEAQ="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc","name":"posc","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835882760607","metageneration":"1","timeCreated":"2019-05-02T22:24:42.760Z","updated":"2019-05-02T22:24:42.760Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:42.760Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?generation=1556835882760607&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ/zj87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CJ/zj87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2","name":"posc2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883152770","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.152Z","updated":"2019-05-02T22:24:43.152Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:43.152Z","size":"3","md5Hash":"9WGq9u8L8U1CCLtGpMyzrQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?generation=1556835883152770&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CILrp87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"17qABQ==","etag":"CILrp87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL","name":"signedURL","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835861141206","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:21.140Z","updated":"2019-05-02T22:24:21.140Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:21.140Z","size":"29","md5Hash":"Jyxvgwm9n2MsrGTMPbMeYA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?generation=1556835861141206&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNat6MPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"ZTqALw==","etag":"CNat6MPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object","name":"some-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835988273530","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:26:28.273Z","updated":"2019-05-02T22:26:28.273Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:26:28.273Z","size":"16","md5Hash":"tHacfSKfByC+/+clG7q+jw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?generation=1556835988273530&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPryt4Dy/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Sm1gKw==","etag":"CPryt4Dy/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object","name":"zero-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835856537016","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:16.536Z","updated":"2019-05-02T22:24:16.536Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:16.536Z","size":"0","md5Hash":"1B2M2Y8AsgTpgAmY7PhCfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?generation=1556835856537016&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLirz8Hx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AAAAAA==","etag":"CLirz8Hx/eECEAE="}]}" - } - }, - { - "ID": "744cbe6970c9623b", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpfI-4jHQ7K_XJuo8cMIfOvPLKerfuQA1KlRDBi4Fq11Uc9i--oKNuFL_MbH0CCxK8PeI4r4fmZILF9hdSGAMMpMmyX0w67j7t84C4tRDx2LnbzG4I" - ] - }, - "Body": "" - } - }, - { - "ID": "a2bea52380600211", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:35 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrzTcVhZWJuBZITVX_K6haiVlOGb5fEYRdiiH8ODa4XsVSPl4FCDs7zPpKMTfoZA1veIMd4ccdcj8rUqWDMcRfDU5l4cOGvNhGZV-h9S2AVJfl6GdE" - ] - }, - "Body": "" - } - }, - { - "ID": "e187777c49170a6d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UouPwITheFghtKlzs2vxoAGnEm_V7OZZnYkk_0pLhV8g5YN6TPRS2n1h0iAtzkuFXYtjovp3ifqt_351LO6-CobCar6VgRnJ-_EQ9WgOWhIC9DzYTs" - ] - }, - "Body": "" - } - }, - { - "ID": "653fa745ed683364", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UoBLZLK-ePmvYxO2ByvMQ1dXb_4-5yhOR-1nYkxjkINZf5fY9ptO2H4RfRtbIrRQ-QQNZHqZGgCOGJakKLkNgVI2_ExobQLkJ-JaJ-ViDWL9GZCEgU" - ] - }, - "Body": "" - } - }, - { - "ID": "91230665f80c46f7", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upasg8dCwbL7dWntUy3O1t_wWVeDNNT3YCz1uVNL8QUNh4z75y0p9OfL57wFX4CUv3NV06Oi9f7a85m42BzgDZl-kqmv4com_INYmRYjKbwrTdFB8A" - ] - }, - "Body": "" - } - }, - { - "ID": "dc68e1f94de4fa2a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqXdUtFVSSFIrK72haTC8pKLogl9pB4lDLpARc8oDKdNt1yfbHj4VAQH0M_Doqj1mLY2LVmThFxvG_1mvtVBm-N4x_J-c5Izq46lZh_xx3wOQEYZKQ" - ] - }, - "Body": "" - } - }, - { - "ID": "db5b5c31ef8e503e", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Ur2nVuDhUpL1-5n7rzvboeHuOBlvh-59eoy59Y5ZIyvUy6FXYj2eCphVTBFkaIPXK7XIS6DJXkPzZABjiFi4hMA7Ewdgp860sYu44qguzhTfOymXYY" - ] - }, - "Body": "" - } - }, - { - "ID": "f44103066254972d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqul73HMDr4p6XCqdfKz99RILwd68VT50DnynneOl5m5E_a7ky2mbMA8mdM6xyAWCQtiJm8dCxvIuKYtjLavA8JUlkGIqqCLGfLbB2EvWahd55Fw_A" - ] - }, - "Body": "" - } - }, - { - "ID": "99aa9c86d4998414", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:36 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uqfk5JfeEeFo80cy1Y-OxXgzSIbt_x7qDWML2yJopIrt9DsPmRq4cDVFqg6sUFStj6aMrjxP7iu_tbwZzS_3BtnwqfPrJZvHllZ7tdeFTXLmto742o" - ] - }, - "Body": "" - } - }, - { - "ID": "24bd829460fb4df6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UprF0LO3CObAApP8iwUO6IvC-Yh-5whtr0F_mMzaj0rltEl4C-AZjNuBMGuQUue-v9oBqb-nQU2ykIenSZ3UYnK4XVV7moFIS43VyP7pByYqsHPkMo" - ] - }, - "Body": "" - } - }, - { - "ID": "acdf7531eddfd72c", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqAEWwZD_lXrdbrjMg-PYbmLP1-zSzjESAs-osetXJrFTueJ6ufxVfA3xkMG5KjtEz-68YryfKl9gAGsgfseX6H5COwltvyW6pJSdxDdA-dTV_S8eU" - ] - }, - "Body": "" - } - }, - { - "ID": "8facd7ede0dc71a6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrInkaBVPO1ip6mFSpF0Nihd1wYE9HNfIz8HfFsZkiEVG1mX7eKMItsbmNwWMehWZ0qaKAf081imDi7DmKqs6mEy7vyTokMNXVUp5LrFoQ-yDfVRuc" - ] - }, - "Body": "" - } - }, - { - "ID": "6affac5f0a8dea5a", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqhbUYzZ-vUqGcyfzGFaQrr89Q05cXGCB0EbWA2Dy8dmQxB3uWJ7_FUwi6Ish-VMpOrZv78bT9qAHd5Tr5b3s8bTCX6QR9HlelvxkKdyrUv2G8QQuY" - ] - }, - "Body": "" - } - }, - { - "ID": "8258a6e1b5dc73aa", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Upola6CIBa9ONjl53PxMpFGQkHvYEau96KG2mgCmUDWDhxZKCw0T58QQRx8OIJW5sisH_acrPcg2o-LLdJgOxCtlVSw-t6wGisVL2-TSopEuezgmu4" - ] - }, - "Body": "" - } - }, - { - "ID": "12186a37ecd09333", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UpFxLaOTSsPe7hDbzBznGCoZvAUmeNgWBwS45X2j3rpLlk8_MIFFuCa-aBpAM83d23ixudKpklnhiqCcrQQotCUZhkrMLpWLHBJdmcM0d5rCKL6opY" - ] - }, - "Body": "" - } - }, - { - "ID": "e7a92beca78e4ec6", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:37 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrpITV4ogt1-_eCsp0KFz7tVeM2A4FzSevs8Q0BPZ36nEPrP9aAaB4nfWASzLZgi2hNM1l9eFkf5UbYYWDT5J-nQFCapVNcm_4Nr43yd94Ce95SVps" - ] - }, - "Body": "" - } - }, - { - "ID": "5bb0506f3ca9135d", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqFBt86Wz7Oqelsb0pPpIskBYdMYLR3XuBmD2F5DwyXt84Q1AUyL0Q94YtqTJP1yzgohHCkJWSGibqUARgP8Ilv9v4lVipsdfjSdFHgIjFJ1H-vuU4" - ] - }, - "Body": "" - } - }, - { - "ID": "3ea05ac204d7a112", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Up2mLLaTTO-0iqVf4m8WXPEsP-mHlaS6hz_gavjGI5yzU0jCjBNDRw1gj6Wa_cQIBzfjytwP2XYhBIJeAglwa6Uqv87WhzeqgGXt4u820FuZKVgY-w" - ] - }, - "Body": "" - } - }, - { - "ID": "bed8580dc40e850e", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UodR7nshMeihBskPB6loBZoNDRFEUxojhFCKhQ0NpiEMRHY0ZHU8ncI4Sr7FHUmIDXyK8xVg_I7Qt5ESrqnGmRzW0h1eM8OGfEM-3weHyzMqN-VMik" - ] - }, - "Body": "" - } - }, - { - "ID": "57489f1d9de0ddb8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UqxQ_NSeD-rrAbZXERIaDr3_1wDC_RZ4MyuFZnOhdDqLkn4qub7rAIamqSQKDD8-jbn0c3XmxvowTghxhq4tZaxxHTmYJtB_FPvJxRZtCt3Leugx4g" - ] - }, - "Body": "" - } - }, - { - "ID": "006b3176688c0ac8", - "Request": { - "Method": "DELETE", - "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 204, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "no-cache, no-store, max-age=0, must-revalidate" - ], - "Content-Length": [ - "0" - ], - "Content-Type": [ - "application/json" - ], - "Date": [ - "Thu, 02 May 2019 22:26:38 GMT" - ], - "Expires": [ - "Mon, 01 Jan 1990 00:00:00 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2Uo4l8LfaJI78MH37sa7nOmY2A8EB4paZccrewnIFFud-pwGLeVf067ZiRaBAOAPsPYlaxxtsr6gtECA6UY0xiUBHPTLFOe4VNnNQpniNWYJEfZ1-I8" - ] - }, - "Body": "" - } - }, - { - "ID": "a853ce25e734dbfd", - "Request": { - "Method": "GET", - "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026pageToken=\u0026prefix=go-integration-test\u0026prettyPrint=false\u0026project=deklerk-sandbox\u0026projection=full", - "Header": { - "Accept-Encoding": [ - "gzip" - ], - "User-Agent": [ - "google-api-go-client/0.5" - ] - }, - "MediaType": "", - "BodyParts": [ - "" - ] - }, - "Response": { - "StatusCode": 200, - "Proto": "HTTP/1.1", - "ProtoMajor": 1, - "ProtoMinor": 1, - "Header": { - "Alt-Svc": [ - "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" - ], - "Cache-Control": [ - "private, max-age=0, must-revalidate, no-transform" - ], - "Content-Length": [ - "47300" - ], - "Content-Type": [ - "application/json; charset=UTF-8" - ], - "Date": [ - "Thu, 02 May 2019 22:26:39 GMT" - ], - "Expires": [ - "Thu, 02 May 2019 22:26:39 GMT" - ], - "Server": [ - "UploadServer" - ], - "Vary": [ - "Origin", - "X-Origin" - ], - "X-Guploader-Customer": [ - "apiary_cloudstorage_metadata" - ], - "X-Guploader-Request-Result": [ - "success" - ], - "X-Guploader-Upload-Result": [ - "success" - ], - "X-Guploader-Uploadid": [ - "AEnB2UrnXhEY-kWgvl-XqP1kXnVDpQphHD9znE-EyBBlFT-kHZtXEsAW5QG5PuzlaJPfQUwZqnTHP0wxu6YDbRCgDMB31UR760_lfEiX88PtVKIdTgCWm5A" - ] - }, - "Body": "{"kind":"storage#buckets","items":[{"kind":"storage#bucket","id":"go-integration-test-20190502-66597705133146-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66597705133146-0001","timeCreated":"2019-05-02T18:29:58.601Z","updated":"2019-05-02T18:29:58.601Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66611506907602-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66611506907602-0001","timeCreated":"2019-05-02T18:30:12.264Z","updated":"2019-05-02T18:30:12.264Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66633965420818-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66633965420818-0001","timeCreated":"2019-05-02T18:30:34.662Z","updated":"2019-05-02T18:30:34.662Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-78294113514519-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-78294113514519-0001","timeCreated":"2019-05-02T21:44:54.960Z","updated":"2019-05-02T21:44:54.960Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0017","timeCreated":"2019-05-02T22:05:34.375Z","updated":"2019-05-02T22:05:36.341Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:34.375Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0018","timeCreated":"2019-05-02T22:05:37.733Z","updated":"2019-05-02T22:05:37.733Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:37.733Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79911595924903-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79911595924903-0001","timeCreated":"2019-05-02T22:11:52.358Z","updated":"2019-05-02T22:11:52.358Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0001","timeCreated":"2019-05-02T22:12:09.706Z","updated":"2019-05-02T22:13:41.634Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"l1":"v2","new":"new"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0017","timeCreated":"2019-05-02T22:14:17.486Z","updated":"2019-05-02T22:14:18.905Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:17.486Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0018","timeCreated":"2019-05-02T22:14:20.105Z","updated":"2019-05-02T22:14:20.105Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:20.105Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0001","timeCreated":"2019-05-02T22:18:47.398Z","updated":"2019-05-02T22:18:47.398Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0002","timeCreated":"2019-05-02T22:18:48.114Z","updated":"2019-05-02T22:18:48.114Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0001","timeCreated":"2019-05-02T22:18:54.701Z","updated":"2019-05-02T22:18:54.701Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0002","timeCreated":"2019-05-02T22:18:55.293Z","updated":"2019-05-02T22:18:55.293Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0001","timeCreated":"2019-05-02T22:20:46.897Z","updated":"2019-05-02T22:22:07.429Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"new":"new","l1":"v2"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0017","timeCreated":"2019-05-02T22:22:50.428Z","updated":"2019-05-02T22:22:51.935Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:50.428Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0018","timeCreated":"2019-05-02T22:22:52.962Z","updated":"2019-05-02T22:22:52.962Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:52.962Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0017","timeCreated":"2019-05-02T22:26:01.908Z","updated":"2019-05-02T22:26:03.544Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:01.908Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0018","timeCreated":"2019-05-02T22:26:15.097Z","updated":"2019-05-02T22:26:15.097Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:15.097Z"},"storageClass":"STANDARD","etag":"CAE="}]}" - } - } - ] -} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go deleted file mode 100644 index aeb7ed418..000000000 --- a/vendor/cloud.google.com/go/storage/writer.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "time" - "unicode/utf8" - - "cloud.google.com/go/internal/trace" -) - -// A Writer writes a Cloud Storage object. -type Writer struct { - // ObjectAttrs are optional attributes to set on the object. Any attributes - // must be initialized before the first Write call. Nil or zero-valued - // attributes are ignored. - ObjectAttrs - - // SendCRC32C specifies whether to transmit a CRC32C field. It should be set - // to true in addition to setting the Writer's CRC32C field, because zero - // is a valid CRC and normally a zero would not be transmitted. - // If a CRC32C is sent, and the data written does not match the checksum, - // the write will be rejected. - // - // Note: SendCRC32C must be set to true BEFORE the first call to - // Writer.Write() in order to send the checksum. If it is set after that - // point, the checksum will be ignored. - SendCRC32C bool - - // ChunkSize controls the maximum number of bytes of the object that the - // Writer will attempt to send to the server in a single request. Objects - // smaller than the size will be sent in a single request, while larger - // objects will be split over multiple requests. The value will be rounded up - // to the nearest multiple of 256K. The default ChunkSize is 16MiB. - // - // Each Writer will internally allocate a buffer of size ChunkSize. This is - // used to buffer input data and allow for the input to be sent again if a - // request must be retried. - // - // If you upload small objects (< 16MiB), you should set ChunkSize - // to a value slightly larger than the objects' sizes to avoid memory bloat. - // This is especially important if you are uploading many small objects - // concurrently. See - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size - // for more information about performance trade-offs related to ChunkSize. - // - // If ChunkSize is set to zero, chunking will be disabled and the object will - // be uploaded in a single request without the use of a buffer. This will - // further reduce memory used during uploads, but will also prevent the writer - // from retrying in case of a transient error from the server or resuming an - // upload that fails midway through, since the buffer is required in order to - // retry the failed request. - // - // ChunkSize must be set before the first Write call. - ChunkSize int - - // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk - // resumable uploads. - // - // For uploads of larger files, the Writer will attempt to retry if the - // request to upload a particular chunk fails with a transient error. - // If a single chunk has been attempting to upload for longer than this - // deadline and the request fails, it will no longer be retried, and the error - // will be returned to the caller. This is only applicable for files which are - // large enough to require a multi-chunk resumable upload. The default value - // is 32s. Users may want to pick a longer deadline if they are using larger - // values for ChunkSize or if they expect to have a slow or unreliable - // internet connection. - // - // To set a deadline on the entire upload, use context timeout or - // cancellation. - ChunkRetryDeadline time.Duration - - // ProgressFunc can be used to monitor the progress of a large write - // operation. If ProgressFunc is not nil and writing requires multiple - // calls to the underlying service (see - // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), - // then ProgressFunc will be invoked after each call with the number of bytes of - // content copied so far. - // - // ProgressFunc should return quickly without blocking. - ProgressFunc func(int64) - - ctx context.Context - o *ObjectHandle - - opened bool - pw *io.PipeWriter - - donec chan struct{} // closed after err and obj are set. - obj *ObjectAttrs - - mu sync.Mutex - err error -} - -// Write appends to w. It implements the io.Writer interface. -// -// Since writes happen asynchronously, Write may return a nil -// error even though the write failed (or will fail). Always -// use the error returned from Writer.Close to determine if -// the upload was successful. -// -// Writes will be retried on transient errors from the server, unless -// Writer.ChunkSize has been set to zero. -func (w *Writer) Write(p []byte) (n int, err error) { - w.mu.Lock() - werr := w.err - w.mu.Unlock() - if werr != nil { - return 0, werr - } - if !w.opened { - if err := w.openWriter(); err != nil { - return 0, err - } - } - n, err = w.pw.Write(p) - if err != nil { - w.mu.Lock() - werr := w.err - w.mu.Unlock() - // Preserve existing functionality that when context is canceled, Write will return - // context.Canceled instead of "io: read/write on closed pipe". This hides the - // pipe implementation detail from users and makes Write seem as though it's an RPC. - if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) { - return n, werr - } - } - return n, err -} - -// Close completes the write operation and flushes any buffered data. -// If Close doesn't return an error, metadata about the written object -// can be retrieved by calling Attrs. -func (w *Writer) Close() error { - if !w.opened { - if err := w.openWriter(); err != nil { - return err - } - } - - // Closing either the read or write causes the entire pipe to close. - if err := w.pw.Close(); err != nil { - return err - } - - <-w.donec - w.mu.Lock() - defer w.mu.Unlock() - trace.EndSpan(w.ctx, w.err) - return w.err -} - -func (w *Writer) openWriter() (err error) { - if err := w.validateWriteAttrs(); err != nil { - return err - } - if w.o.gen != defaultGen { - return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen) - } - - isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) - opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) - params := &openWriterParams{ - ctx: w.ctx, - chunkSize: w.ChunkSize, - chunkRetryDeadline: w.ChunkRetryDeadline, - bucket: w.o.bucket, - attrs: &w.ObjectAttrs, - conds: w.o.conds, - encryptionKey: w.o.encryptionKey, - sendCRC32C: w.SendCRC32C, - donec: w.donec, - setError: w.error, - progress: w.progress, - setObj: func(o *ObjectAttrs) { w.obj = o }, - } - if err := w.ctx.Err(); err != nil { - return err // short-circuit - } - w.pw, err = w.o.c.tc.OpenWriter(params, opts...) - if err != nil { - return err - } - w.opened = true - go w.monitorCancel() - - return nil -} - -// monitorCancel is intended to be used as a background goroutine. It monitors the -// context, and when it observes that the context has been canceled, it manually -// closes things that do not take a context. -func (w *Writer) monitorCancel() { - select { - case <-w.ctx.Done(): - w.mu.Lock() - werr := w.ctx.Err() - w.err = werr - w.mu.Unlock() - - // Closing either the read or write causes the entire pipe to close. - w.CloseWithError(werr) - case <-w.donec: - } -} - -// CloseWithError aborts the write operation with the provided error. -// CloseWithError always returns nil. -// -// Deprecated: cancel the context passed to NewWriter instead. -func (w *Writer) CloseWithError(err error) error { - if !w.opened { - return nil - } - return w.pw.CloseWithError(err) -} - -// Attrs returns metadata about a successfully-written object. -// It's only valid to call it after Close returns nil. -func (w *Writer) Attrs() *ObjectAttrs { - return w.obj -} - -func (w *Writer) validateWriteAttrs() error { - attrs := w.ObjectAttrs - // Check the developer didn't change the object Name (this is unfortunate, but - // we don't want to store an object under the wrong name). - if attrs.Name != w.o.object { - return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) - } - if !utf8.ValidString(attrs.Name) { - return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) - } - if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { - return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") - } - if w.ChunkSize < 0 { - return errors.New("storage: Writer.ChunkSize must be non-negative") - } - return nil -} - -// progress is a convenience wrapper that reports write progress to the Writer -// ProgressFunc if it is set and progress is non-zero. -func (w *Writer) progress(p int64) { - if w.ProgressFunc != nil && p != 0 { - w.ProgressFunc(p) - } -} - -// error acquires the Writer's lock, sets the Writer's err to the given error, -// then relinquishes the lock. -func (w *Writer) error(err error) { - w.mu.Lock() - w.err = err - w.mu.Unlock() -} diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml deleted file mode 100644 index a8bc979e0..000000000 --- a/vendor/dario.cat/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "dario.cat/mergo" \ No newline at end of file diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/vendor/dario.cat/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/vendor/dario.cat/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f94..000000000 --- a/vendor/dario.cat/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! â¤ï¸ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/vendor/dario.cat/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md deleted file mode 100644 index 7d0cf9f32..000000000 --- a/vendor/dario.cat/mergo/README.md +++ /dev/null @@ -1,248 +0,0 @@ -# Mergo - -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Test status][1]][2] -[![OpenSSF Scorecard][21]][22] -[![OpenSSF Best Practices][19]][20] -[![Coverage status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA status][13]][14] - -[![GoDoc][3]][4] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master -[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo -[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge -[20]: https://bestpractices.coreinfrastructure.org/projects/7177 -[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge -[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important notes - -#### 1.0.0 - -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. - -#### 0.3.9 - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get dario.cat/mergo - - // use in your .go code - import ( - "dario.cat/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "dario.cat/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "dario.cat/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md deleted file mode 100644 index a5de61f77..000000000 --- a/vendor/dario.cat/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go deleted file mode 100644 index 7d96ec054..000000000 --- a/vendor/dario.cat/mergo/doc.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -# Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -# Important notes - -1.0.0 - -In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`. - -0.3.9 - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -# Install - -Do your usual installation procedure: - - go get dario.cat/mergo - - // use in your .go code - import ( - "dario.cat/mergo" - ) - -# Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "dario.cat/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -# Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "dario.cat/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -# Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -# About - -Written by Dario Castañé: https://da.rio.hn - -# License - -BSD 3-Clause license, as Go language. -*/ -package mergo diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go deleted file mode 100644 index b50d5c2a4..000000000 --- a/vendor/dario.cat/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go deleted file mode 100644 index 0ef9b2138..000000000 --- a/vendor/dario.cat/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go deleted file mode 100644 index 0a721e2d8..000000000 --- a/vendor/dario.cat/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md deleted file mode 100644 index bf0c3e1aa..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ /dev/null @@ -1,762 +0,0 @@ -# Release History - -## 1.10.0 (2024-02-29) - -### Features Added - -* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created. -* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code. -* Added type `MatchConditions` for use in conditional requests. - -### Bugs Fixed - -* Fixed a potential race condition between `NullValue` and `IsNullValue`. -* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`. - -### Other Changes - -* Update dependencies. - -## 1.9.2 (2024-02-06) - -### Bugs Fixed - -* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header. - -### Other Changes - -* Update to latest version of `internal`. - -## 1.9.1 (2023-12-11) - -### Bugs Fixed - -* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. - -### Other Changes - -* Update dependencies. - -## 1.9.0 (2023-11-06) - -### Breaking Changes -> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` -* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. -* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. - -### Bugs Fixed - -* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. -* Include error text instead of error type in traces when the transport returns an error. -* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. -* Block key and SAS authentication for non TLS protected endpoints. -* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. -* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. -* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. -* Context values created by `azcore` will no longer flow across disjoint HTTP requests. - -### Other Changes - -* Skip generating trace info for no-op tracers. -* The `clientName` paramater in client constructors has been renamed to `moduleName`. - -## 1.9.0-beta.1 (2023-10-05) - -### Other Changes - -* The beta features for tracing and fakes have been reinstated. - -## 1.8.0 (2023-10-05) - -### Features Added - -* This includes the following features from `v1.8.0-beta.N` releases. - * Claims and CAE for authentication. - * New `messaging` package. - * Various helpers in the `runtime` package. - * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. -* Added types `KeyCredential` and `SASCredential` to the `azcore` package. - * Includes their respective constructor functions. -* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. - * Includes their respective constructor functions and options types. - -### Breaking Changes -> These changes affect only code written against beta versions of `v1.8.0` -* The beta features for tracing and fakes have been omitted for this release. - -### Bugs Fixed - -* Fixed an issue that could cause some ARM RPs to not be automatically registered. -* Block bearer token authentication for non TLS protected endpoints. - -### Other Changes - -* Updated dependencies. - -## 1.8.0-beta.3 (2023-09-07) - -### Features Added - -* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. - -### Bugs Fixed - -* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. - -### Other Changes - -* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. - * `WithCaptureResponse` - * `WithHTTPHeader` - * `WithRetryOptions` - -## 1.7.2 (2023-09-06) - -### Bugs Fixed - -* Fix default HTTP transport to work in WASM modules. - -## 1.8.0-beta.2 (2023-08-14) - -### Features Added - -* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. -* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. - -### Breaking Changes - -> This change affects only code written against beta version `v1.8.0-beta.1`. -* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. - -> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. -* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. - -### Bugs Fixed - -* Propagate any query parameters when constructing a fake poller and/or injecting next links. - -## 1.7.1 (2023-08-14) - -## Bugs Fixed - -* Enable TLS renegotiation in the default transport policy. - -## 1.8.0-beta.1 (2023-07-12) - -### Features Added - -- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) - -### Other Changes - -* The beta features for CAE, tracing, and fakes have been reinstated. - -## 1.7.0 (2023-07-12) - -### Features Added -* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. - -### Breaking Changes -> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 -* The beta features for CAE, tracing, and fakes have been omitted for this release. - -## 1.7.0-beta.2 (2023-06-06) - -### Breaking Changes -> These changes affect only code written against beta version v1.7.0-beta.1 -* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. - * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. -* Method `AddError()` has been removed from type `tracing.Span`. -* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. - -## 1.6.1 (2023-06-06) - -### Bugs Fixed -* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. - -### Other Changes -* This version contains all bug fixes from `v1.7.0-beta.1` - -## 1.7.0-beta.1 (2023-05-24) - -### Features Added -* Restored CAE support for ARM clients. -* Added supporting features to enable distributed tracing. - * Added func `runtime.StartSpan()` for use by SDKs to start spans. - * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. - * Added field `TracingNamespace` to `runtime.PipelineOptions`. - * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. - * Added field `SpanFromContext` to `tracing.TracerOptions`. - * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. - * Added supporting pipeline policies to include HTTP spans when creating clients. -* Added package `fake` to support generated fakes packages in SDKs. - * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. - * Added an internal fake poller implementation. - -### Bugs Fixed -* Retry policy always clones the underlying `*http.Request` before invoking the next policy. -* Added some non-standard error codes to the list of error codes for unregistered resource providers. - -## 1.6.0 (2023-05-04) - -### Features Added -* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. -* Added `TenantID` field to `policy.TokenRequestOptions`. - -## 1.5.0 (2023-04-06) - -### Features Added -* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry. - -### Breaking Changes -> These changes affect only code written against a beta version such as v1.5.0-beta.1 -> These features will return in v1.6.0-beta.1. -* Removed `TokenRequestOptions.Claims` and `.TenantID` -* Removed ARM client support for CAE and cross-tenant auth. - -### Bugs Fixed -* Added non-conformant LRO terminal states `Cancelled` and `Completed`. - -### Other Changes -* Updated to latest `internal` module. - -## 1.5.0-beta.1 (2023-03-02) - -### Features Added -* This release includes the features added in v1.4.0-beta.1 - -## 1.4.0 (2023-03-02) -> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1. - -### Features Added -* Add `Clone()` method for `arm/policy.ClientOptions`. - -### Bugs Fixed -* ARM's RP registration policy will no longer swallow unrecognized errors. -* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`. -* Fixed wrong policy copy in `arm/runtime.NewPipeline()`. - -## 1.4.0-beta.1 (2023-02-02) - -### Features Added -* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. -* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`. -* ARM bearer token policy handles CAE challenges. - -## 1.3.1 (2023-02-02) - -### Other Changes -* Update dependencies to latest versions. - -## 1.3.0 (2023-01-06) - -### Features Added -* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy` - with custom authorization logic -* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively. - -### Other Changes -* Updated `internal` module to latest version. -* `policy/Request.SetBody()` allows replacing a request's body with an empty one - -## 1.2.0 (2022-11-04) - -### Features Added -* Added `ClientOptions.APIVersion` field, which overrides the default version a client - requests of the service, if the client supports this (all ARM clients do). -* Added package `tracing` that contains the building blocks for distributed tracing. -* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation. - -### Bugs Fixed -* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`. -* Fixed the MaxRetryDelay default to be 60s. -* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior. -* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost. - -### Other Changes -* Retain contents of read-only fields when sending requests. - -## 1.1.4 (2022-10-06) - -### Bugs Fixed -* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`. -* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string - -### Other Changes -* Removed logging URL from retry policy as it's redundant. -* Retry policy logs when it exits due to a non-retriable status code. - -## 1.1.3 (2022-09-01) - -### Bugs Fixed -* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines. - -## 1.1.2 (2022-08-09) - -### Other Changes -* Fixed various doc bugs. - -## 1.1.1 (2022-06-30) - -### Bugs Fixed -* Avoid polling when a RELO LRO synchronously terminates. - -## 1.1.0 (2022-06-03) - -### Other Changes -* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests. - -## 1.0.0 (2022-05-12) - -### Features Added -* Added interface `runtime.PollingHandler` to support custom poller implementations. - * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. - -### Breaking Changes -* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` -* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` -* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` -* Removed `TokenRequestOptions.TenantID` -* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` -* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` -* Removed `arm/runtime.FinalStateVia` and related `const` values -* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` -* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. -* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` -* `TokenCredential.GetToken` now returns `AccessToken` by value. - -### Bugs Fixed -* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. -* The `Operation-Location` poller now properly handles `final-state-via` values. -* Improvements in `runtime.Poller[T]` - * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. - * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. - -### Other Changes -* Updated to latest `internal` module and absorbed breaking changes. - * Use `temporal.Resource` and deleted copy. -* The internal poller implementation has been refactored. - * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. - * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. - * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. -* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` -* Default User-Agent headers no longer include `azcore` version information - -## 0.23.1 (2022-04-14) - -### Bugs Fixed -* Include XML header when marshalling XML content. -* Handle XML namespaces when searching for error code. -* Handle `odata.error` when searching for error code. - -## 0.23.0 (2022-04-04) - -### Features Added -* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. -* Added `cloud` package with a new API for cloud configuration -* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. - -### Breaking Changes -* Removed the `Poller` type-alias to the internal poller implementation. -* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. -* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. -* Replaced `arm.Endpoint` with `cloud` API - * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` - * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` -* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. - * Removed the `pollerID` parameter as it's no longer required. - * Created optional parameter structs and moved optional parameters into them. -* Changed `FinalStateVia` field to a `const` type. - -### Other Changes -* Converted expiring resource and dependent types to use generics. - -## 0.22.0 (2022-03-03) - -### Features Added -* Added header `WWW-Authenticate` to the default allow-list of headers for logging. -* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. - * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). - -### Breaking Changes -* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. - -## 0.21.1 (2022-02-04) - -### Bugs Fixed -* Restore response body after reading in `Poller.FinalResponse()`. (#16911) -* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) - -### Other Changes -* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) - -## 0.21.0 (2022-01-11) - -### Features Added -* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. -* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. - -### Breaking Changes -* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` -* Renamed `arm/ClientOptions.Host` to `.Endpoint` -* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` -* Removed `azcore.HTTPResponse` interface type -* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter -* `runtime.NewResponseError()` no longer requires an `error` parameter - -## 0.20.0 (2021-10-22) - -### Breaking Changes -* Removed `arm.Connection` -* Removed `azcore.Credential` and `.NewAnonymousCredential()` - * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` -* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication -* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` -* Contents in the `log` package have been slightly renamed. -* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` -* Changed parameters for `NewBearerTokenPolicy()` -* Moved policy config options out of `arm/runtime` and into `arm/policy` - -### Features Added -* Updating Documentation -* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints -* `azcore.ClientOptions` contains common pipeline configuration settings -* Added support for multi-tenant authorization in `arm/runtime` -* Require one second minimum when calling `PollUntilDone()` - -### Bug Fixes -* Fixed a potential panic when creating the default Transporter. -* Close LRO initial response body when creating a poller. -* Fixed a panic when recursively cloning structs that contain time.Time. - -## 0.19.0 (2021-08-25) - -### Breaking Changes -* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). - * `azcore` has all core functionality. - * `log` contains facilities for configuring in-box logging. - * `policy` is used for configuring pipeline options and creating custom pipeline policies. - * `runtime` contains various helpers used by SDK authors and generated content. - * `streaming` has helpers for streaming IO operations. -* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. - * As a result, the `Request.Telemetry()` method has been removed. -* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. -* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. -* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. - -### Bug Fixes -* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. - -### Other Changes -* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. - * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. -* Poller logic has been consolidated across ARM and core implementations. - * This required some changes to the internal interfaces for core pollers. -* The core poller types have been improved, including more logging and test coverage. - -## 0.18.1 (2021-08-20) - -### Features Added -* Adds an `ETag` type for comparing etags and handling etags on requests -* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object - -### Bugs Fixed -* `JoinPaths` will preserve query parameters encoded in the `root` url. - -### Other Changes -* Bumps dependency on `internal` module to the latest version (v0.7.0) - -## 0.18.0 (2021-07-29) -### Features Added -* Replaces methods from Logger type with two package methods for interacting with the logging functionality. -* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` -* `azcore.SetListener` replaces `azcore.Logger().SetListener` - -### Breaking Changes -* Removes `Logger` type from `azcore` - - -## 0.17.0 (2021-07-27) -### Features Added -* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) -* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) - -### Breaking Changes -* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) -* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) -* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) - - -## 0.16.2 (2021-05-26) -### Features Added -* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) - - -## 0.16.1 (2021-05-19) -### Features Added -* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) - - -## 0.16.0 (2021-05-07) -### Features Added -* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) - - -## 0.15.1 (2021-05-06) -### Features Added -* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) - - -## 0.15.0 (2021-05-05) -### Features Added -* Add support for null map and slice -* Export `Response.Payload` method - -### Breaking Changes -* remove `Response.UnmarshalError` as it's no longer required - - -## 0.14.5 (2021-04-23) -### Features Added -* Add `UnmarshalError()` on `azcore.Response` - - -## 0.14.4 (2021-04-22) -### Features Added -* Support for basic LRO polling -* Added type `LROPoller` and supporting types for basic polling on long running operations. -* rename poller param and added doc comment - -### Bugs Fixed -* Fixed content type detection bug in logging. - - -## 0.14.3 (2021-03-29) -### Features Added -* Add support for multi-part form data -* Added method `WriteMultipartFormData()` to Request. - - -## 0.14.2 (2021-03-17) -### Features Added -* Add support for encoding JSON null values -* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. -* Documentation fixes - -### Bugs Fixed -* Fixed improper error wrapping - - -## 0.14.1 (2021-02-08) -### Features Added -* Add `Pager` and `Poller` interfaces to azcore - - -## 0.14.0 (2021-01-12) -### Features Added -* Accept zero-value options for default values -* Specify zero-value options structs to accept default values. -* Remove `DefaultXxxOptions()` methods. -* Do not silently change TryTimeout on negative values -* make per-try timeout opt-in - - -## 0.13.4 (2020-11-20) -### Features Added -* Include telemetry string in User Agent - - -## 0.13.3 (2020-11-20) -### Features Added -* Updating response body handling on `azcore.Response` - - -## 0.13.2 (2020-11-13) -### Features Added -* Remove implementation of stateless policies as first-class functions. - - -## 0.13.1 (2020-11-05) -### Features Added -* Add `Telemetry()` method to `azcore.Request()` - - -## 0.13.0 (2020-10-14) -### Features Added -* Rename `log` to `logger` to avoid name collision with the log package. -* Documentation improvements -* Simplified `DefaultHTTPClientTransport()` implementation - - -## 0.12.1 (2020-10-13) -### Features Added -* Update `internal` module dependence to `v0.5.0` - - -## 0.12.0 (2020-10-08) -### Features Added -* Removed storage specific content -* Removed internal content to prevent API clutter -* Refactored various policy options to conform with our options pattern - - -## 0.11.0 (2020-09-22) -### Features Added - -* Removed `LogError` and `LogSlowResponse`. -* Renamed `options` in `RequestLogOptions`. -* Updated `NewRequestLogPolicy()` to follow standard pattern for options. -* Refactored `requestLogPolicy.Do()` per above changes. -* Cleaned up/added logging in retry policy. -* Export `NewResponseError()` -* Fix `RequestLogOptions` comment - - -## 0.10.1 (2020-09-17) -### Features Added -* Add default console logger -* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. -* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. -* Add `LogLongRunningOperation` - - -## 0.10.0 (2020-09-10) -### Features Added -* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. -* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. -* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. -* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. -* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. -* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. -* moved path concatenation into `JoinPaths()` func - - -## 0.9.6 (2020-08-18) -### Features Added -* Improvements to body download policy -* Always download the response body for error responses, i.e. HTTP status codes >= 400. -* Simplify variable declarations - - -## 0.9.5 (2020-08-11) -### Features Added -* Set the Content-Length header in `Request.SetBody` - - -## 0.9.4 (2020-08-03) -### Features Added -* Fix cancellation of per try timeout -* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. -* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. -* Do not drain response body if there are no more retries -* Do not retry non-idempotent operations when body download fails - - -## 0.9.3 (2020-07-28) -### Features Added -* Add support for custom HTTP request headers -* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. -* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. -* Remove method specific to Go 1.14 - - -## 0.9.2 (2020-07-28) -### Features Added -* Omit read-only content from request payloads -* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. -* Verify no fields were dropped -* Handle embedded struct types -* Added test for cloning by value -* Add messages to failures - - -## 0.9.1 (2020-07-22) -### Features Added -* Updated dependency on internal module to fix race condition. - - -## 0.9.0 (2020-07-09) -### Features Added -* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. -* Updated `sdk/internal` dependency to latest version. -* Rename package alias - - -## 0.8.2 (2020-06-29) -### Features Added -* Added missing documentation comments - -### Bugs Fixed -* Fixed a bug in body download policy. - - -## 0.8.1 (2020-06-26) -### Features Added -* Miscellaneous clean-up reported by linters - - -## 0.8.0 (2020-06-01) -### Features Added -* Differentiate between standard and URL encoding. - - -## 0.7.1 (2020-05-27) -### Features Added -* Add support for for base64 encoding and decoding of payloads. - - -## 0.7.0 (2020-05-12) -### Features Added -* Change `RetryAfter()` to a function. - - -## 0.6.0 (2020-04-29) -### Features Added -* Updating `RetryAfter` to only return the detaion in the RetryAfter header - - -## 0.5.0 (2020-03-23) -### Features Added -* Export `TransportFunc` - -### Breaking Changes -* Removed `IterationDone` - - -## 0.4.1 (2020-02-25) -### Features Added -* Ensure per-try timeout is properly cancelled -* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. -* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. -* `Logger.Should()` will return false if no listener is set. - - -## 0.4.0 (2020-02-18) -### Features Added -* Enable custom `RetryOptions` to be specified per API call -* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. -* Remove 429 from the list of default HTTP status codes for retry. -* Change StatusCodesForRetry to a slice so consumers can append to it. -* Added support for retry-after in HTTP-date format. -* Cleaned up some comments specific to storage. -* Remove `Request.SetQueryParam()` -* Renamed `MaxTries` to `MaxRetries` - -## 0.3.0 (2020-01-16) -### Features Added -* Added `DefaultRetryOptions` to create initialized default options. - -### Breaking Changes -* Removed `Response.CheckStatusCode()` - - -## 0.2.0 (2020-01-15) -### Features Added -* Add support for marshalling and unmarshalling JSON -* Removed `Response.Payload` field -* Exit early when unmarsahlling if there is no payload - - -## 0.1.0 (2020-01-10) -### Features Added -* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt deleted file mode 100644 index 48ea6616b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) Microsoft Corporation. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md deleted file mode 100644 index 35a74e18d..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Azure Core Client Module for Go - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) -[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) -[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) - -The `azcore` module provides a set of common interfaces and types for Go SDK client modules. -These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). - -## Getting started - -This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. - -Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. -To add the latest version to your `go.mod` file, execute the following command. - -```bash -go get github.com/Azure/azure-sdk-for-go/sdk/azcore -``` - -General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). - -## Contributing -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information, see the -[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) -or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any -additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml deleted file mode 100644 index aab921853..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ /dev/null @@ -1,29 +0,0 @@ -# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. -trigger: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/azcore/ - - eng/ - -pr: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/azcore/ - - eng/ - -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go deleted file mode 100644 index 9d077a3e1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package cloud - -var ( - // AzureChina contains configuration for Azure China. - AzureChina = Configuration{ - ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, - } - // AzureGovernment contains configuration for Azure Government. - AzureGovernment = Configuration{ - ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, - } - // AzurePublic contains configuration for Azure Public Cloud. - AzurePublic = Configuration{ - ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, - } -) - -// ServiceName identifies a cloud service. -type ServiceName string - -// ResourceManager is a global constant identifying Azure Resource Manager. -const ResourceManager ServiceName = "resourceManager" - -// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. -type ServiceConfiguration struct { - // Audience is the audience the client will request for its access tokens. - Audience string - // Endpoint is the service's base URL. - Endpoint string -} - -// Configuration configures a cloud. -type Configuration struct { - // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. - ActiveDirectoryAuthorityHost string - // Services contains configuration for the cloud's services. - Services map[ServiceName]ServiceConfiguration -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go deleted file mode 100644 index 985b1bde2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -/* -Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. - -Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as -"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other -Azure Clouds to configure clients appropriately. - -This package contains predefined configuration for well-known sovereign clouds such as Azure Government and -Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For -example, configuring a credential and ARM client for Azure Government: - - opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} - cred, err := azidentity.NewDefaultAzureCredential( - &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, - ) - handle(err) - - client, err := armsubscription.NewClient( - cred, &arm.ClientOptions{ClientOptions: opts}, - ) - handle(err) - -Applications deployed to a private cloud such as Azure Stack create a Configuration object with -appropriate values: - - c := cloud.Configuration{ - ActiveDirectoryAuthorityHost: "https://...", - Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ - cloud.ResourceManager: { - Audience: "...", - Endpoint: "https://...", - }, - }, - } - opts := azcore.ClientOptions{Cloud: c} - - cred, err := azidentity.NewDefaultAzureCredential( - &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, - ) - handle(err) - - client, err := armsubscription.NewClient( - cred, &arm.ClientOptions{ClientOptions: opts}, - ) - handle(err) -*/ -package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go deleted file mode 100644 index 9d1c2f0c0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ /dev/null @@ -1,173 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azcore - -import ( - "reflect" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" -) - -// AccessToken represents an Azure service bearer access token with expiry information. -type AccessToken = exported.AccessToken - -// TokenCredential represents a credential capable of providing an OAuth token. -type TokenCredential = exported.TokenCredential - -// KeyCredential contains an authentication key used to authenticate to an Azure service. -type KeyCredential = exported.KeyCredential - -// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. -// - key is the authentication key -func NewKeyCredential(key string) *KeyCredential { - return exported.NewKeyCredential(key) -} - -// SASCredential contains a shared access signature used to authenticate to an Azure service. -type SASCredential = exported.SASCredential - -// NewSASCredential creates a new instance of [SASCredential] with the specified values. -// - sas is the shared access signature -func NewSASCredential(sas string) *SASCredential { - return exported.NewSASCredential(sas) -} - -// holds sentinel values used to send nulls -var nullables map[reflect.Type]any = map[reflect.Type]any{} -var nullablesMu sync.RWMutex - -// NullValue is used to send an explicit 'null' within a request. -// This is typically used in JSON-MERGE-PATCH operations to delete a value. -func NullValue[T any]() T { - t := shared.TypeOfT[T]() - - nullablesMu.RLock() - v, found := nullables[t] - nullablesMu.RUnlock() - - if found { - // return the sentinel object - return v.(T) - } - - // promote to exclusive lock and check again (double-checked locking pattern) - nullablesMu.Lock() - defer nullablesMu.Unlock() - v, found = nullables[t] - - if !found { - var o reflect.Value - if k := t.Kind(); k == reflect.Map { - o = reflect.MakeMap(t) - } else if k == reflect.Slice { - // empty slices appear to all point to the same data block - // which causes comparisons to become ambiguous. so we create - // a slice with len/cap of one which ensures a unique address. - o = reflect.MakeSlice(t, 1, 1) - } else { - o = reflect.New(t.Elem()) - } - v = o.Interface() - nullables[t] = v - } - // return the sentinel object - return v.(T) -} - -// IsNullValue returns true if the field contains a null sentinel value. -// This is used by custom marshallers to properly encode a null value. -func IsNullValue[T any](v T) bool { - // see if our map has a sentinel object for this *T - t := reflect.TypeOf(v) - nullablesMu.RLock() - defer nullablesMu.RUnlock() - - if o, found := nullables[t]; found { - o1 := reflect.ValueOf(o) - v1 := reflect.ValueOf(v) - // we found it; return true if v points to the sentinel object. - // NOTE: maps and slices can only be compared to nil, else you get - // a runtime panic. so we compare addresses instead. - return o1.Pointer() == v1.Pointer() - } - // no sentinel object for this *t - return false -} - -// ClientOptions contains optional settings for a client's pipeline. -// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. -// Zero-value fields will have their specified default values applied during use. -type ClientOptions = policy.ClientOptions - -// Client is a basic HTTP client. It consists of a pipeline and tracing provider. -type Client struct { - pl runtime.Pipeline - tr tracing.Tracer - - // cached on the client to support shallow copying with new values - tp tracing.Provider - modVer string - namespace string -} - -// NewClient creates a new Client instance with the provided values. -// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. -// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. -// - plOpts - pipeline configuration options; can be the zero-value -// - options - optional client configurations; pass nil to accept the default values -func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { - if options == nil { - options = &ClientOptions{} - } - - if !options.Telemetry.Disabled { - if err := shared.ValidateModVer(moduleVersion); err != nil { - return nil, err - } - } - - pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) - - tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) - if tr.Enabled() && plOpts.Tracing.Namespace != "" { - tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) - } - - return &Client{ - pl: pl, - tr: tr, - tp: options.TracingProvider, - modVer: moduleVersion, - namespace: plOpts.Tracing.Namespace, - }, nil -} - -// Pipeline returns the pipeline for this client. -func (c *Client) Pipeline() runtime.Pipeline { - return c.pl -} - -// Tracer returns the tracer for this client. -func (c *Client) Tracer() tracing.Tracer { - return c.tr -} - -// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. -// Note that the values for module name and version will be preserved from the source Client. -// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans -func (c *Client) WithClientName(clientName string) *Client { - tr := c.tp.NewTracer(clientName, c.modVer) - if tr.Enabled() && c.namespace != "" { - tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) - } - return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go deleted file mode 100644 index 654a5f404..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ /dev/null @@ -1,264 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -/* -Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. - -The middleware consists of three components. - - - One or more Policy instances. - - A Transporter instance. - - A Pipeline instance that combines the Policy and Transporter instances. - -# Implementing the Policy Interface - -A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as -a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share -the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to -avoid race conditions. - -A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can -perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, -and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request -work, it must call the Next() method on the *policy.Request instance in order to pass the request to the -next Policy in the chain. - -When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance -can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response -body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response -and error instances to its caller. - -Template for implementing a stateless Policy: - - type policyFunc func(*policy.Request) (*http.Response, error) - - // Do implements the Policy interface on policyFunc. - func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { - return pf(req) - } - - func NewMyStatelessPolicy() policy.Policy { - return policyFunc(func(req *policy.Request) (*http.Response, error) { - // TODO: mutate/process Request here - - // forward Request to next Policy & get Response/error - resp, err := req.Next() - - // TODO: mutate/process Response/error here - - // return Response/error to previous Policy - return resp, err - }) - } - -Template for implementing a stateful Policy: - - type MyStatefulPolicy struct { - // TODO: add configuration/setting fields here - } - - // TODO: add initialization args to NewMyStatefulPolicy() - func NewMyStatefulPolicy() policy.Policy { - return &MyStatefulPolicy{ - // TODO: initialize configuration/setting fields here - } - } - - func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { - // TODO: mutate/process Request here - - // forward Request to next Policy & get Response/error - resp, err := req.Next() - - // TODO: mutate/process Response/error here - - // return Response/error to previous Policy - return resp, err - } - -# Implementing the Transporter Interface - -The Transporter interface is responsible for sending the HTTP request and returning the corresponding -HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter -implementation uses a shared http.Client from the standard library. - -The same stateful/stateless rules for Policy implementations apply to Transporter implementations. - -# Using Policy and Transporter Instances Via a Pipeline - -To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. - - func NewPipeline(transport Transporter, policies ...Policy) Pipeline - -The specified Policy instances form a chain and are invoked in the order provided to NewPipeline -followed by the Transporter. - -Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. - - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) - - func (p Pipeline) Do(req *Request) (*http.Request, error) - -The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter -instances. The response/error is then sent through the same chain of Policy instances in reverse -order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with -TransportA. - - pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) - -The flow of Request and Response looks like the following: - - policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ - | - HTTP(S) endpoint - | - caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ - -# Creating a Request Instance - -The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also -contains some internal state and provides various convenience methods. You create a Request instance -by calling the runtime.NewRequest function: - - func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) - -If the Request should contain a body, call the SetBody method. - - func (req *Request) SetBody(body ReadSeekCloser, contentType string) error - -A seekable stream is required so that upon retry, the retry Policy instance can seek the stream -back to the beginning before retrying the network request and re-uploading the body. - -# Sending an Explicit Null - -Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. - - { - "delete-me": null - } - -This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as -a means to resolve the ambiguity between a field to be excluded and its zero-value. - - type Widget struct { - Name *string `json:",omitempty"` - Count *int `json:",omitempty"` - } - -In the above example, Name and Count are defined as pointer-to-type to disambiguate between -a missing value (nil) and a zero-value (0) which might have semantic differences. - -In a PATCH operation, any fields left as nil are to have their values preserved. When updating -a Widget's count, one simply specifies the new value for Count, leaving Name nil. - -To fulfill the requirement for sending a JSON null, the NullValue() function can be used. - - w := Widget{ - Count: azcore.NullValue[*int](), - } - -This sends an explict "null" for Count, indicating that any current value for Count should be deleted. - -# Processing the Response - -When the HTTP response is received, the *http.Response is returned directly. Each Policy instance -can inspect/mutate the *http.Response. - -# Built-in Logging - -To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. - -By default the logger writes to stderr. This can be customized by calling log.SetListener, providing -a callback that writes to the desired location. Any custom logging implementation MUST provide its -own synchronization to handle concurrent invocations. - -See the docs for the log package for further details. - -# Pageable Operations - -Pageable operations return potentially large data sets spread over multiple GET requests. The result of -each GET is a "page" of data consisting of a slice of items. - -Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. - - func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] - -The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages -and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. - - pager := widgetClient.NewListWidgetsPager(nil) - for pager.More() { - page, err := pager.NextPage(context.TODO()) - // handle err - for _, widget := range page.Values { - // process widget - } - } - -# Long-Running Operations - -Long-running operations (LROs) are operations consisting of an initial request to start the operation followed -by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one -of the following values. - - - Succeeded - the LRO completed successfully - - Failed - the LRO failed to complete - - Canceled - the LRO was canceled - -LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. - - func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) - -When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. -It does _not_ mean that the widget has been created or updated (or failed to be created/updated). - -The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, -call the PollUntilDone() method. - - poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) - // handle err - result, err := poller.PollUntilDone(context.TODO(), nil) - // handle err - // use result - -The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the -context is canceled/timed out. - -Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to -this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation -mechanism as required. - -# Resume Tokens - -Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to -recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. - - token, err := poller.ResumeToken() - // handle error - -Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls -to poller.Poll() might change the poller's state. In this case, a new token should be created. - -After the token has been obtained, it can be used to recreate an instance of the originating poller. - - poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ - ResumeToken: token, - }) - -When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. - -Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO -BeginA() will result in an error. - -# Fakes - -The fake package contains types used for constructing in-memory fake servers used in unit tests. -This allows writing tests to cover various success/error conditions without the need for connecting to a live service. - -Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. -*/ -package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go deleted file mode 100644 index 17bd50c67..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azcore - -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - -// ResponseError is returned when a request is made to a service and -// the service returns a non-success HTTP status code. -// Use errors.As() to access this type in the error chain. -type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go deleted file mode 100644 index 2b19d01f7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azcore - -import ( - "strings" -) - -// ETag is a property used for optimistic concurrency during updates -// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 -// An ETag can be empty (""). -type ETag string - -// ETagAny is an ETag that represents everything, the value is "*" -const ETagAny ETag = "*" - -// Equals does a strong comparison of two ETags. Equals returns true when both -// ETags are not weak and the values of the underlying strings are equal. -func (e ETag) Equals(other ETag) bool { - return !e.IsWeak() && !other.IsWeak() && e == other -} - -// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match -// character-by-character, regardless of either or both being tagged as "weak". -func (e ETag) WeakEquals(other ETag) bool { - getStart := func(e1 ETag) int { - if e1.IsWeak() { - return 2 - } - return 0 - } - aStart := getStart(e) - bStart := getStart(other) - - aVal := e[aStart:] - bVal := other[bStart:] - - return aVal == bVal -} - -// IsWeak specifies whether the ETag is strong or weak. -func (e ETag) IsWeak() bool { - return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") -} - -// MatchConditions specifies HTTP options for conditional requests. -type MatchConditions struct { - // Optionally limit requests to resources that have a matching ETag. - IfMatch *ETag - - // Optionally limit requests to resources that do not match the ETag. - IfNoneMatch *ETag -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go deleted file mode 100644 index f2b296b6d..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ /dev/null @@ -1,175 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "context" - "encoding/base64" - "fmt" - "io" - "net/http" - "sync/atomic" - "time" -) - -type nopCloser struct { - io.ReadSeeker -} - -func (n nopCloser) Close() error { - return nil -} - -// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. -// Exported as streaming.NopCloser(). -func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { - return nopCloser{rs} -} - -// HasStatusCode returns true if the Response's status code is one of the specified values. -// Exported as runtime.HasStatusCode(). -func HasStatusCode(resp *http.Response, statusCodes ...int) bool { - if resp == nil { - return false - } - for _, sc := range statusCodes { - if resp.StatusCode == sc { - return true - } - } - return false -} - -// AccessToken represents an Azure service bearer access token with expiry information. -// Exported as azcore.AccessToken. -type AccessToken struct { - Token string - ExpiresOn time.Time -} - -// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. -// Exported as policy.TokenRequestOptions. -type TokenRequestOptions struct { - // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a - // service may return in a claims challenge following an authorization failure. If a service returned the - // claims value base64 encoded, it must be decoded before setting this field. - Claims string - - // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, - // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for - // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up - // in a loop retrying an API call with a token that has been revoked due to CAE. - EnableCAE bool - - // Scopes contains the list of permission scopes required for the token. - Scopes []string - - // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in - // their configured default tenants when this field isn't set. - TenantID string -} - -// TokenCredential represents a credential capable of providing an OAuth token. -// Exported as azcore.TokenCredential. -type TokenCredential interface { - // GetToken requests an access token for the specified set of scopes. - GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) -} - -// DecodeByteArray will base-64 decode the provided string into v. -// Exported as runtime.DecodeByteArray() -func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { - if len(s) == 0 { - return nil - } - payload := string(s) - if payload[0] == '"' { - // remove surrounding quotes - payload = payload[1 : len(payload)-1] - } - switch format { - case Base64StdFormat: - decoded, err := base64.StdEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - case Base64URLFormat: - // use raw encoding as URL format should not contain any '=' characters - decoded, err := base64.RawURLEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - default: - return fmt.Errorf("unrecognized byte array format: %d", format) - } -} - -// KeyCredential contains an authentication key used to authenticate to an Azure service. -// Exported as azcore.KeyCredential. -type KeyCredential struct { - cred *keyCredential -} - -// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. -// - key is the authentication key -func NewKeyCredential(key string) *KeyCredential { - return &KeyCredential{cred: newKeyCredential(key)} -} - -// Update replaces the existing key with the specified value. -func (k *KeyCredential) Update(key string) { - k.cred.Update(key) -} - -// SASCredential contains a shared access signature used to authenticate to an Azure service. -// Exported as azcore.SASCredential. -type SASCredential struct { - cred *keyCredential -} - -// NewSASCredential creates a new instance of [SASCredential] with the specified values. -// - sas is the shared access signature -func NewSASCredential(sas string) *SASCredential { - return &SASCredential{cred: newKeyCredential(sas)} -} - -// Update replaces the existing shared access signature with the specified value. -func (k *SASCredential) Update(sas string) { - k.cred.Update(sas) -} - -// KeyCredentialGet returns the key for cred. -func KeyCredentialGet(cred *KeyCredential) string { - return cred.cred.Get() -} - -// SASCredentialGet returns the shared access sig for cred. -func SASCredentialGet(cred *SASCredential) string { - return cred.cred.Get() -} - -type keyCredential struct { - key atomic.Value // string -} - -func newKeyCredential(key string) *keyCredential { - keyCred := keyCredential{} - keyCred.key.Store(key) - return &keyCred -} - -func (k *keyCredential) Get() string { - return k.key.Load().(string) -} - -func (k *keyCredential) Update(key string) { - k.key.Store(key) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go deleted file mode 100644 index e45f831ed..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go +++ /dev/null @@ -1,77 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "errors" - "net/http" -) - -// Policy represents an extensibility point for the Pipeline that can mutate the specified -// Request and react to the received Response. -// Exported as policy.Policy. -type Policy interface { - // Do applies the policy to the specified Request. When implementing a Policy, mutate the - // request before calling req.Next() to move on to the next policy, and respond to the result - // before returning to the caller. - Do(req *Request) (*http.Response, error) -} - -// Pipeline represents a primitive for sending HTTP requests and receiving responses. -// Its behavior can be extended by specifying policies during construction. -// Exported as runtime.Pipeline. -type Pipeline struct { - policies []Policy -} - -// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. -// Exported as policy.Transporter. -type Transporter interface { - // Do sends the HTTP request and returns the HTTP response or error. - Do(req *http.Request) (*http.Response, error) -} - -// used to adapt a TransportPolicy to a Policy -type transportPolicy struct { - trans Transporter -} - -func (tp transportPolicy) Do(req *Request) (*http.Response, error) { - if tp.trans == nil { - return nil, errors.New("missing transporter") - } - resp, err := tp.trans.Do(req.Raw()) - if err != nil { - return nil, err - } else if resp == nil { - // there was no response and no error (rare but can happen) - // this ensures the retry policy will retry the request - return nil, errors.New("received nil response") - } - return resp, nil -} - -// NewPipeline creates a new Pipeline object from the specified Policies. -// Not directly exported, but used as part of runtime.NewPipeline(). -func NewPipeline(transport Transporter, policies ...Policy) Pipeline { - // transport policy must always be the last in the slice - policies = append(policies, transportPolicy{trans: transport}) - return Pipeline{ - policies: policies, - } -} - -// Do is called for each and every HTTP request. It passes the request through all -// the Policy objects (which can transform the Request's URL/query parameters/headers) -// and ultimately sends the transformed HTTP request over the network. -func (p Pipeline) Do(req *Request) (*http.Response, error) { - if req == nil { - return nil, errors.New("request cannot be nil") - } - req.policies = p.policies - return req.Next() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go deleted file mode 100644 index 8d1ae213c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ /dev/null @@ -1,223 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" -) - -// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when -// encoding/decoding a slice of bytes to/from a string. -// Exported as runtime.Base64Encoding -type Base64Encoding int - -const ( - // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. - Base64StdFormat Base64Encoding = 0 - - // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. - Base64URLFormat Base64Encoding = 1 -) - -// EncodeByteArray will base-64 encode the byte slice v. -// Exported as runtime.EncodeByteArray() -func EncodeByteArray(v []byte, format Base64Encoding) string { - if format == Base64URLFormat { - return base64.RawURLEncoding.EncodeToString(v) - } - return base64.StdEncoding.EncodeToString(v) -} - -// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. -// Don't use this type directly, use NewRequest() instead. -// Exported as policy.Request. -type Request struct { - req *http.Request - body io.ReadSeekCloser - policies []Policy - values opValues -} - -type opValues map[reflect.Type]interface{} - -// Set adds/changes a value -func (ov opValues) set(value interface{}) { - ov[reflect.TypeOf(value)] = value -} - -// Get looks for a value set by SetValue first -func (ov opValues) get(value interface{}) bool { - v, ok := ov[reflect.ValueOf(value).Elem().Type()] - if ok { - reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) - } - return ok -} - -// NewRequest creates a new Request with the specified input. -// Exported as runtime.NewRequest(). -func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { - req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) - if err != nil { - return nil, err - } - if req.URL.Host == "" { - return nil, errors.New("no Host in request URL") - } - if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { - return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) - } - return &Request{req: req}, nil -} - -// Body returns the original body specified when the Request was created. -func (req *Request) Body() io.ReadSeekCloser { - return req.body -} - -// Raw returns the underlying HTTP request. -func (req *Request) Raw() *http.Request { - return req.req -} - -// Next calls the next policy in the pipeline. -// If there are no more policies, nil and an error are returned. -// This method is intended to be called from pipeline policies. -// To send a request through a pipeline call Pipeline.Do(). -func (req *Request) Next() (*http.Response, error) { - if len(req.policies) == 0 { - return nil, errors.New("no more policies") - } - nextPolicy := req.policies[0] - nextReq := *req - nextReq.policies = nextReq.policies[1:] - return nextPolicy.Do(&nextReq) -} - -// SetOperationValue adds/changes a mutable key/value associated with a single operation. -func (req *Request) SetOperationValue(value interface{}) { - if req.values == nil { - req.values = opValues{} - } - req.values.set(value) -} - -// OperationValue looks for a value set by SetOperationValue(). -func (req *Request) OperationValue(value interface{}) bool { - if req.values == nil { - return false - } - return req.values.get(value) -} - -// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length -// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", -// Content-Type won't be set, and if it was set, will be deleted. -// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. -func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { - // clobber the existing Content-Type to preserve behavior - return SetBody(req, body, contentType, true) -} - -// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. -func (req *Request) RewindBody() error { - if req.body != nil { - // Reset the stream back to the beginning and restore the body - _, err := req.body.Seek(0, io.SeekStart) - req.req.Body = req.body - return err - } - return nil -} - -// Close closes the request body. -func (req *Request) Close() error { - if req.body == nil { - return nil - } - return req.body.Close() -} - -// Clone returns a deep copy of the request with its context changed to ctx. -func (req *Request) Clone(ctx context.Context) *Request { - r2 := *req - r2.req = req.req.Clone(ctx) - return &r2 -} - -// WithContext returns a shallow copy of the request with its context changed to ctx. -func (req *Request) WithContext(ctx context.Context) *Request { - r2 := new(Request) - *r2 = *req - r2.req = r2.req.WithContext(ctx) - return r2 -} - -// not exported but dependent on Request - -// PolicyFunc is a type that implements the Policy interface. -// Use this type when implementing a stateless policy as a first-class function. -type PolicyFunc func(*Request) (*http.Response, error) - -// Do implements the Policy interface on policyFunc. -func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { - return pf(req) -} - -// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. -// - req is the request to modify -// - body is the request body; if nil or empty, Content-Length won't be set -// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted -// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType -func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { - var err error - var size int64 - if body != nil { - size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return err - } - } - if size == 0 { - // treat an empty stream the same as a nil one: assign req a nil body - body = nil - // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content - // (Del is a no-op when the header has no value) - req.req.Header.Del(shared.HeaderContentLength) - } else { - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return err - } - req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) - req.Raw().GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream - return body, err - } - } - // keep a copy of the body argument. this is to handle cases - // where req.Body is replaced, e.g. httputil.DumpRequest and friends. - req.body = body - req.req.Body = body - req.req.ContentLength = size - if contentType == "" { - // Del is a no-op when the header has no value - req.req.Header.Del(shared.HeaderContentType) - } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { - req.req.Header.Set(shared.HeaderContentType, contentType) - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go deleted file mode 100644 index bd348b868..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ /dev/null @@ -1,167 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "regexp" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" -) - -// NewResponseError creates a new *ResponseError from the provided HTTP response. -// Exported as runtime.NewResponseError(). -func NewResponseError(resp *http.Response) error { - // prefer the error code in the response header - if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { - return NewResponseErrorWithErrorCode(resp, ec) - } - - // if we didn't get x-ms-error-code, check in the response body - body, err := exported.Payload(resp, nil) - if err != nil { - // since we're not returning the ResponseError in this - // case we also don't want to write it to the log. - return err - } - - var errorCode string - if len(body) > 0 { - if fromJSON := extractErrorCodeJSON(body); fromJSON != "" { - errorCode = fromJSON - } else if fromXML := extractErrorCodeXML(body); fromXML != "" { - errorCode = fromXML - } - } - - return NewResponseErrorWithErrorCode(resp, errorCode) -} - -// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. -// Exported as runtime.NewResponseErrorWithErrorCode(). -func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { - respErr := &ResponseError{ - ErrorCode: errorCode, - StatusCode: resp.StatusCode, - RawResponse: resp, - } - log.Write(log.EventResponseError, respErr.Error()) - return respErr -} - -func extractErrorCodeJSON(body []byte) string { - var rawObj map[string]interface{} - if err := json.Unmarshal(body, &rawObj); err != nil { - // not a JSON object - return "" - } - - // check if this is a wrapped error, i.e. { "error": { ... } } - // if so then unwrap it - if wrapped, ok := rawObj["error"]; ok { - unwrapped, ok := wrapped.(map[string]interface{}) - if !ok { - return "" - } - rawObj = unwrapped - } else if wrapped, ok := rawObj["odata.error"]; ok { - // check if this a wrapped odata error, i.e. { "odata.error": { ... } } - unwrapped, ok := wrapped.(map[string]any) - if !ok { - return "" - } - rawObj = unwrapped - } - - // now check for the error code - code, ok := rawObj["code"] - if !ok { - return "" - } - codeStr, ok := code.(string) - if !ok { - return "" - } - return codeStr -} - -func extractErrorCodeXML(body []byte) string { - // regular expression is much easier than dealing with the XML parser - rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) - res := rx.FindStringSubmatch(string(body)) - if len(res) != 2 { - return "" - } - // first submatch is the entire thing, second one is the captured error code - return res[1] -} - -// ResponseError is returned when a request is made to a service and -// the service returns a non-success HTTP status code. -// Use errors.As() to access this type in the error chain. -// Exported as azcore.ResponseError. -type ResponseError struct { - // ErrorCode is the error code returned by the resource provider if available. - ErrorCode string - - // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. - StatusCode int - - // RawResponse is the underlying HTTP response. - RawResponse *http.Response -} - -// Error implements the error interface for type ResponseError. -// Note that the message contents are not contractual and can change over time. -func (e *ResponseError) Error() string { - const separator = "--------------------------------------------------------------------------------" - // write the request method and URL with response status code - msg := &bytes.Buffer{} - if e.RawResponse != nil { - if e.RawResponse.Request != nil { - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) - } else { - fmt.Fprintln(msg, "Request information not available") - } - fmt.Fprintln(msg, separator) - fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) - } else { - fmt.Fprintln(msg, "Missing RawResponse") - fmt.Fprintln(msg, separator) - } - if e.ErrorCode != "" { - fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) - } else { - fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") - } - if e.RawResponse != nil { - fmt.Fprintln(msg, separator) - body, err := exported.Payload(e.RawResponse, nil) - if err != nil { - // this really shouldn't fail at this point as the response - // body is already cached (it was read in NewResponseError) - fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - if err := json.Indent(msg, body, "", " "); err != nil { - // failed to pretty-print so just dump it verbatim - fmt.Fprint(msg, string(body)) - } - // the standard library doesn't have a pretty-printer for XML - fmt.Fprintln(msg) - } else { - fmt.Fprintln(msg, "Response contained no body") - } - } - fmt.Fprintln(msg, separator) - - return msg.String() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go deleted file mode 100644 index 5cb87de2c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// This is an internal helper package to combine the complete logging APIs. -package log - -import ( - azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" -) - -type Event = log.Event - -const ( - EventRequest = azlog.EventRequest - EventResponse = azlog.EventResponse - EventResponseError = azlog.EventResponseError - EventRetryPolicy = azlog.EventRetryPolicy - EventLRO = azlog.EventLRO -) - -// Write invokes the underlying listener with the specified event and message. -// If the event shouldn't be logged or there is no listener then Write does nothing. -func Write(cls log.Event, msg string) { - log.Write(cls, msg) -} - -// Writef invokes the underlying listener with the specified event and formatted message. -// If the event shouldn't be logged or there is no listener then Writef does nothing. -func Writef(cls log.Event, format string, a ...interface{}) { - log.Writef(cls, format, a...) -} - -// SetListener will set the Logger to write to the specified listener. -func SetListener(lst func(Event, string)) { - log.SetListener(lst) -} - -// Should returns true if the specified log event should be written to the log. -// By default all log events will be logged. Call SetEvents() to limit -// the log events for logging. -// If no listener has been set this will return false. -// Calling this method is useful when the message to log is computationally expensive -// and you want to avoid the overhead if its log event is not enabled. -func Should(cls log.Event) bool { - return log.Should(cls) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go deleted file mode 100644 index b05bd8b38..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ /dev/null @@ -1,159 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package async - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md - -// Applicable returns true if the LRO is using Azure-AsyncOperation. -func Applicable(resp *http.Response) bool { - return resp.Header.Get(shared.HeaderAzureAsync) != "" -} - -// CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { - _, ok := token["asyncURL"] - return ok -} - -// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. -type Poller[T any] struct { - pl exported.Pipeline - - resp *http.Response - - // The URL from Azure-AsyncOperation header. - AsyncURL string `json:"asyncURL"` - - // The URL from Location header. - LocURL string `json:"locURL"` - - // The URL from the initial LRO request. - OrigURL string `json:"origURL"` - - // The HTTP method from the initial LRO request. - Method string `json:"method"` - - // The value of final-state-via from swagger, can be the empty string. - FinalState pollers.FinalStateVia `json:"finalState"` - - // The LRO's current state. - CurState string `json:"state"` -} - -// New creates a new Poller from the provided initial response and final-state type. -// Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { - if resp == nil { - log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") - return &Poller[T]{pl: pl}, nil - } - log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") - asyncURL := resp.Header.Get(shared.HeaderAzureAsync) - if asyncURL == "" { - return nil, errors.New("response is missing Azure-AsyncOperation header") - } - if !poller.IsValidURL(asyncURL) { - return nil, fmt.Errorf("invalid polling URL %s", asyncURL) - } - // check for provisioning state. if the operation is a RELO - // and terminates synchronously this will prevent extra polling. - // it's ok if there's no provisioning state. - state, _ := poller.GetProvisioningState(resp) - if state == "" { - state = poller.StatusInProgress - } - p := &Poller[T]{ - pl: pl, - resp: resp, - AsyncURL: asyncURL, - LocURL: resp.Header.Get(shared.HeaderLocation), - OrigURL: resp.Request.URL.String(), - Method: resp.Request.Method, - FinalState: finalState, - CurState: state, - } - return p, nil -} - -// Done returns true if the LRO is in a terminal state. -func (p *Poller[T]) Done() bool { - return poller.IsTerminalState(p.CurState) -} - -// Poll retrieves the current state of the LRO. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { - err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { - if !poller.StatusCodeValid(resp) { - p.resp = resp - return "", exported.NewResponseError(resp) - } - state, err := poller.GetStatus(resp) - if err != nil { - return "", err - } else if state == "" { - return "", errors.New("the response did not contain a status") - } - p.resp = resp - p.CurState = state - return p.CurState, nil - }) - if err != nil { - return nil, err - } - return p.resp, nil -} - -func (p *Poller[T]) Result(ctx context.Context, out *T) error { - if p.resp.StatusCode == http.StatusNoContent { - return nil - } else if poller.Failed(p.CurState) { - return exported.NewResponseError(p.resp) - } - var req *exported.Request - var err error - if p.Method == http.MethodPatch || p.Method == http.MethodPut { - // for PATCH and PUT, the final GET is on the original resource URL - req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) - } else if p.Method == http.MethodPost { - if p.FinalState == pollers.FinalStateViaAzureAsyncOp { - // no final GET required - } else if p.FinalState == pollers.FinalStateViaOriginalURI { - req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) - } else if p.LocURL != "" { - // ideally FinalState would be set to "location" but it isn't always. - // must check last due to more permissive condition. - req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } - } - if err != nil { - return err - } - - // if a final GET request has been created, execute it - if req != nil { - resp, err := p.pl.Do(req) - if err != nil { - return err - } - p.resp = resp - } - - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go deleted file mode 100644 index 2bb9e105b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package body - -import ( - "context" - "errors" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// Kind is the identifier of this type in a resume token. -const kind = "body" - -// Applicable returns true if the LRO is using no headers, just provisioning state. -// This is only applicable to PATCH and PUT methods and assumes no polling headers. -func Applicable(resp *http.Response) bool { - // we can't check for absense of headers due to some misbehaving services - // like redis that return a Location header but don't actually use that protocol - return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut -} - -// CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { - t, ok := token["type"] - if !ok { - return false - } - tt, ok := t.(string) - if !ok { - return false - } - return tt == kind -} - -// Poller is an LRO poller that uses the Body pattern. -type Poller[T any] struct { - pl exported.Pipeline - - resp *http.Response - - // The poller's type, used for resume token processing. - Type string `json:"type"` - - // The URL for polling. - PollURL string `json:"pollURL"` - - // The LRO's current state. - CurState string `json:"state"` -} - -// New creates a new Poller from the provided initial response. -// Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { - if resp == nil { - log.Write(log.EventLRO, "Resuming Body poller.") - return &Poller[T]{pl: pl}, nil - } - log.Write(log.EventLRO, "Using Body poller.") - p := &Poller[T]{ - pl: pl, - resp: resp, - Type: kind, - PollURL: resp.Request.URL.String(), - } - // default initial state to InProgress. depending on the HTTP - // status code and provisioning state, we might change the value. - curState := poller.StatusInProgress - provState, err := poller.GetProvisioningState(resp) - if err != nil && !errors.Is(err, poller.ErrNoBody) { - return nil, err - } - if resp.StatusCode == http.StatusCreated && provState != "" { - // absense of provisioning state is ok for a 201, means the operation is in progress - curState = provState - } else if resp.StatusCode == http.StatusOK { - if provState != "" { - curState = provState - } else if provState == "" { - // for a 200, absense of provisioning state indicates success - curState = poller.StatusSucceeded - } - } else if resp.StatusCode == http.StatusNoContent { - curState = poller.StatusSucceeded - } - p.CurState = curState - return p, nil -} - -func (p *Poller[T]) Done() bool { - return poller.IsTerminalState(p.CurState) -} - -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { - err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { - if !poller.StatusCodeValid(resp) { - p.resp = resp - return "", exported.NewResponseError(resp) - } - if resp.StatusCode == http.StatusNoContent { - p.resp = resp - p.CurState = poller.StatusSucceeded - return p.CurState, nil - } - state, err := poller.GetProvisioningState(resp) - if errors.Is(err, poller.ErrNoBody) { - // a missing response body in non-204 case is an error - return "", err - } else if state == "" { - // a response body without provisioning state is considered terminal success - state = poller.StatusSucceeded - } else if err != nil { - return "", err - } - p.resp = resp - p.CurState = state - return p.CurState, nil - }) - if err != nil { - return nil, err - } - return p.resp, nil -} - -func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go deleted file mode 100644 index 259834718..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ /dev/null @@ -1,133 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package fake - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// Applicable returns true if the LRO is a fake. -func Applicable(resp *http.Response) bool { - return resp.Header.Get(shared.HeaderFakePollerStatus) != "" -} - -// CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { - _, ok := token["fakeURL"] - return ok -} - -// Poller is an LRO poller that uses the Core-Fake-Poller pattern. -type Poller[T any] struct { - pl exported.Pipeline - - resp *http.Response - - // The API name from CtxAPINameKey - APIName string `json:"apiName"` - - // The URL from Core-Fake-Poller header. - FakeURL string `json:"fakeURL"` - - // The LRO's current state. - FakeStatus string `json:"status"` -} - -// lroStatusURLSuffix is the URL path suffix for a faked LRO. -const lroStatusURLSuffix = "/get/fake/status" - -// New creates a new Poller from the provided initial response. -// Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { - if resp == nil { - log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") - return &Poller[T]{pl: pl}, nil - } - - log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") - fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) - if fakeStatus == "" { - return nil, errors.New("response is missing Fake-Poller-Status header") - } - - ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) - if ctxVal == nil { - return nil, errors.New("missing value for CtxAPINameKey") - } - - apiName, ok := ctxVal.(string) - if !ok { - return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) - } - - qp := "" - if resp.Request.URL.RawQuery != "" { - qp = "?" + resp.Request.URL.RawQuery - } - - p := &Poller[T]{ - pl: pl, - resp: resp, - APIName: apiName, - // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() - FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), - FakeStatus: fakeStatus, - } - return p, nil -} - -// Done returns true if the LRO is in a terminal state. -func (p *Poller[T]) Done() bool { - return poller.IsTerminalState(p.FakeStatus) -} - -// Poll retrieves the current state of the LRO. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { - ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) - err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { - if !poller.StatusCodeValid(resp) { - p.resp = resp - return "", exported.NewResponseError(resp) - } - fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) - if fakeStatus == "" { - return "", errors.New("response is missing Fake-Poller-Status header") - } - p.resp = resp - p.FakeStatus = fakeStatus - return p.FakeStatus, nil - }) - if err != nil { - return nil, err - } - return p.resp, nil -} - -func (p *Poller[T]) Result(ctx context.Context, out *T) error { - if p.resp.StatusCode == http.StatusNoContent { - return nil - } else if poller.Failed(p.FakeStatus) { - return exported.NewResponseError(p.resp) - } - - return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) -} - -// SanitizePollerPath removes any fake-appended suffix from a URL's path. -func SanitizePollerPath(path string) string { - return strings.TrimSuffix(path, lroStatusURLSuffix) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go deleted file mode 100644 index d6be89876..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ /dev/null @@ -1,119 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package loc - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// Kind is the identifier of this type in a resume token. -const kind = "loc" - -// Applicable returns true if the LRO is using Location. -func Applicable(resp *http.Response) bool { - return resp.Header.Get(shared.HeaderLocation) != "" -} - -// CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { - t, ok := token["type"] - if !ok { - return false - } - tt, ok := t.(string) - if !ok { - return false - } - return tt == kind -} - -// Poller is an LRO poller that uses the Location pattern. -type Poller[T any] struct { - pl exported.Pipeline - resp *http.Response - - Type string `json:"type"` - PollURL string `json:"pollURL"` - CurState string `json:"state"` -} - -// New creates a new Poller from the provided initial response. -// Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { - if resp == nil { - log.Write(log.EventLRO, "Resuming Location poller.") - return &Poller[T]{pl: pl}, nil - } - log.Write(log.EventLRO, "Using Location poller.") - locURL := resp.Header.Get(shared.HeaderLocation) - if locURL == "" { - return nil, errors.New("response is missing Location header") - } - if !poller.IsValidURL(locURL) { - return nil, fmt.Errorf("invalid polling URL %s", locURL) - } - // check for provisioning state. if the operation is a RELO - // and terminates synchronously this will prevent extra polling. - // it's ok if there's no provisioning state. - state, _ := poller.GetProvisioningState(resp) - if state == "" { - state = poller.StatusInProgress - } - return &Poller[T]{ - pl: pl, - resp: resp, - Type: kind, - PollURL: locURL, - CurState: state, - }, nil -} - -func (p *Poller[T]) Done() bool { - return poller.IsTerminalState(p.CurState) -} - -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { - err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { - // location polling can return an updated polling URL - if h := resp.Header.Get(shared.HeaderLocation); h != "" { - p.PollURL = h - } - // if provisioning state is available, use that. this is only - // for some ARM LRO scenarios (e.g. DELETE with a Location header) - // so if it's missing then use HTTP status code. - provState, _ := poller.GetProvisioningState(resp) - p.resp = resp - if provState != "" { - p.CurState = provState - } else if resp.StatusCode == http.StatusAccepted { - p.CurState = poller.StatusInProgress - } else if resp.StatusCode > 199 && resp.StatusCode < 300 { - // any 2xx other than a 202 indicates success - p.CurState = poller.StatusSucceeded - } else { - p.CurState = poller.StatusFailed - } - return p.CurState, nil - }) - if err != nil { - return nil, err - } - return p.resp, nil -} - -func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go deleted file mode 100644 index 1bc7ad0ac..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ /dev/null @@ -1,145 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package op - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// Applicable returns true if the LRO is using Operation-Location. -func Applicable(resp *http.Response) bool { - return resp.Header.Get(shared.HeaderOperationLocation) != "" -} - -// CanResume returns true if the token can rehydrate this poller type. -func CanResume(token map[string]interface{}) bool { - _, ok := token["oplocURL"] - return ok -} - -// Poller is an LRO poller that uses the Operation-Location pattern. -type Poller[T any] struct { - pl exported.Pipeline - resp *http.Response - - OpLocURL string `json:"oplocURL"` - LocURL string `json:"locURL"` - OrigURL string `json:"origURL"` - Method string `json:"method"` - FinalState pollers.FinalStateVia `json:"finalState"` - CurState string `json:"state"` -} - -// New creates a new Poller from the provided initial response. -// Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { - if resp == nil { - log.Write(log.EventLRO, "Resuming Operation-Location poller.") - return &Poller[T]{pl: pl}, nil - } - log.Write(log.EventLRO, "Using Operation-Location poller.") - opURL := resp.Header.Get(shared.HeaderOperationLocation) - if opURL == "" { - return nil, errors.New("response is missing Operation-Location header") - } - if !poller.IsValidURL(opURL) { - return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) - } - locURL := resp.Header.Get(shared.HeaderLocation) - // Location header is optional - if locURL != "" && !poller.IsValidURL(locURL) { - return nil, fmt.Errorf("invalid Location URL %s", locURL) - } - // default initial state to InProgress. if the - // service sent us a status then use that instead. - curState := poller.StatusInProgress - status, err := poller.GetStatus(resp) - if err != nil && !errors.Is(err, poller.ErrNoBody) { - return nil, err - } - if status != "" { - curState = status - } - - return &Poller[T]{ - pl: pl, - resp: resp, - OpLocURL: opURL, - LocURL: locURL, - OrigURL: resp.Request.URL.String(), - Method: resp.Request.Method, - FinalState: finalState, - CurState: curState, - }, nil -} - -func (p *Poller[T]) Done() bool { - return poller.IsTerminalState(p.CurState) -} - -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { - err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { - if !poller.StatusCodeValid(resp) { - p.resp = resp - return "", exported.NewResponseError(resp) - } - state, err := poller.GetStatus(resp) - if err != nil { - return "", err - } else if state == "" { - return "", errors.New("the response did not contain a status") - } - p.resp = resp - p.CurState = state - return p.CurState, nil - }) - if err != nil { - return nil, err - } - return p.resp, nil -} - -func (p *Poller[T]) Result(ctx context.Context, out *T) error { - var req *exported.Request - var err error - if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { - req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { - // no final GET required, terminal response should have it - } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { - return rlErr - } else if rl != "" { - req, err = exported.NewRequest(ctx, http.MethodGet, rl) - } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { - req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) - } else if p.Method == http.MethodPost && p.LocURL != "" { - req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } - if err != nil { - return err - } - - // if a final GET request has been created, execute it - if req != nil { - resp, err := p.pl.Do(req) - if err != nil { - return err - } - p.resp = resp - } - - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go deleted file mode 100644 index 37ed647f4..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package pollers - -// FinalStateVia is the enumerated type for the possible final-state-via values. -type FinalStateVia string - -const ( - // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. - FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" - - // FinalStateViaLocation indicates the final payload comes from the Location URL. - FinalStateViaLocation FinalStateVia = "location" - - // FinalStateViaOriginalURI indicates the final payload comes from the original URL. - FinalStateViaOriginalURI FinalStateVia = "original-uri" - - // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. - FinalStateViaOpLocation FinalStateVia = "operation-location" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go deleted file mode 100644 index d8d86a46c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ /dev/null @@ -1,187 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package pollers - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "reflect" - - azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// getTokenTypeName creates a type name from the type parameter T. -func getTokenTypeName[T any]() (string, error) { - tt := shared.TypeOfT[T]() - var n string - if tt.Kind() == reflect.Pointer { - n = "*" - tt = tt.Elem() - } - n += tt.Name() - if n == "" { - return "", errors.New("nameless types are not allowed") - } - return n, nil -} - -type resumeTokenWrapper[T any] struct { - Type string `json:"type"` - Token T `json:"token"` -} - -// NewResumeToken creates a resume token from the specified type. -// An error is returned if the generic type has no name (e.g. struct{}). -func NewResumeToken[TResult, TSource any](from TSource) (string, error) { - n, err := getTokenTypeName[TResult]() - if err != nil { - return "", err - } - b, err := json.Marshal(resumeTokenWrapper[TSource]{ - Type: n, - Token: from, - }) - if err != nil { - return "", err - } - return string(b), nil -} - -// ExtractToken returns the poller-specific token information from the provided token value. -func ExtractToken(token string) ([]byte, error) { - raw := map[string]json.RawMessage{} - if err := json.Unmarshal([]byte(token), &raw); err != nil { - return nil, err - } - // this is dependent on the type resumeTokenWrapper[T] - tk, ok := raw["token"] - if !ok { - return nil, errors.New("missing token value") - } - return tk, nil -} - -// IsTokenValid returns an error if the specified token isn't applicable for generic type T. -func IsTokenValid[T any](token string) error { - raw := map[string]interface{}{} - if err := json.Unmarshal([]byte(token), &raw); err != nil { - return err - } - t, ok := raw["type"] - if !ok { - return errors.New("missing type value") - } - tt, ok := t.(string) - if !ok { - return fmt.Errorf("invalid type format %T", t) - } - n, err := getTokenTypeName[T]() - if err != nil { - return err - } - if tt != n { - return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) - } - return nil -} - -// used if the operation synchronously completed -type NopPoller[T any] struct { - resp *http.Response - result T -} - -// NewNopPoller creates a NopPoller from the provided response. -// It unmarshals the response body into an instance of T. -func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { - np := &NopPoller[T]{resp: resp} - if resp.StatusCode == http.StatusNoContent { - return np, nil - } - payload, err := exported.Payload(resp, nil) - if err != nil { - return nil, err - } - if len(payload) == 0 { - return np, nil - } - if err = json.Unmarshal(payload, &np.result); err != nil { - return nil, err - } - return np, nil -} - -func (*NopPoller[T]) Done() bool { - return true -} - -func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { - return p.resp, nil -} - -func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { - *out = p.result - return nil -} - -// PollHelper creates and executes the request, calling update() with the response. -// If the request fails, the update func is not called. -// The update func returns the state of the operation for logging purposes or an error -// if it fails to extract the required state from the response. -func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error { - req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint) - if err != nil { - return err - } - resp, err := pl.Do(req) - if err != nil { - return err - } - state, err := update(resp) - if err != nil { - return err - } - log.Writef(log.EventLRO, "State %s", state) - return nil -} - -// ResultHelper processes the response as success or failure. -// In the success case, it unmarshals the payload into either a new instance of T or out. -// In the failure case, it creates an *azcore.Response error from the response. -func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { - // short-circuit the simple success case with no response body to unmarshal - if resp.StatusCode == http.StatusNoContent { - return nil - } - - defer resp.Body.Close() - if !poller.StatusCodeValid(resp) || failed { - // the LRO failed. unmarshall the error and update state - return azexported.NewResponseError(resp) - } - - // success case - payload, err := exported.Payload(resp, nil) - if err != nil { - return err - } - if len(payload) == 0 { - return nil - } - - if err = json.Unmarshal(payload, out); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go deleted file mode 100644 index 330bf9a60..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package shared - -const ( - ContentTypeAppJSON = "application/json" - ContentTypeAppXML = "application/xml" - ContentTypeTextPlain = "text/plain" -) - -const ( - HeaderAuthorization = "Authorization" - HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" - HeaderAzureAsync = "Azure-AsyncOperation" - HeaderContentLength = "Content-Length" - HeaderContentType = "Content-Type" - HeaderFakePollerStatus = "Fake-Poller-Status" - HeaderLocation = "Location" - HeaderOperationLocation = "Operation-Location" - HeaderRetryAfter = "Retry-After" - HeaderRetryAfterMS = "Retry-After-Ms" - HeaderUserAgent = "User-Agent" - HeaderWWWAuthenticate = "WWW-Authenticate" - HeaderXMSClientRequestID = "x-ms-client-request-id" - HeaderXMSRequestID = "x-ms-request-id" - HeaderXMSErrorCode = "x-ms-error-code" - HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" -) - -const BearerTokenPrefix = "Bearer " - -const TracingNamespaceAttrName = "az.namespace" - -const ( - // Module is the name of the calling module used in telemetry data. - Module = "azcore" - - // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.10.0" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go deleted file mode 100644 index d3da2c5fd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ /dev/null @@ -1,149 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package shared - -import ( - "context" - "fmt" - "net/http" - "reflect" - "regexp" - "strconv" - "time" -) - -// NOTE: when adding a new context key type, it likely needs to be -// added to the deny-list of key types in ContextWithDeniedValues - -// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. -type CtxWithHTTPHeaderKey struct{} - -// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. -type CtxWithRetryOptionsKey struct{} - -// CtxWithCaptureResponse is used as a context key for retrieving the raw response. -type CtxWithCaptureResponse struct{} - -// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. -type CtxWithTracingTracer struct{} - -// CtxAPINameKey is used as a context key for adding/retrieving the API name. -type CtxAPINameKey struct{} - -// Delay waits for the duration to elapse or the context to be cancelled. -func Delay(ctx context.Context, delay time.Duration) error { - select { - case <-time.After(delay): - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. -// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after -func RetryAfter(resp *http.Response) time.Duration { - if resp == nil { - return 0 - } - - type retryData struct { - header string - units time.Duration - - // custom is used when the regular algorithm failed and is optional. - // the returned duration is used verbatim (units is not applied). - custom func(string) time.Duration - } - - nop := func(string) time.Duration { return 0 } - - // the headers are listed in order of preference - retries := []retryData{ - { - header: HeaderRetryAfterMS, - units: time.Millisecond, - custom: nop, - }, - { - header: HeaderXMSRetryAfterMS, - units: time.Millisecond, - custom: nop, - }, - { - header: HeaderRetryAfter, - units: time.Second, - - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - custom: func(ra string) time.Duration { - t, err := time.Parse(time.RFC1123, ra) - if err != nil { - return 0 - } - return time.Until(t) - }, - }, - } - - for _, retry := range retries { - v := resp.Header.Get(retry.header) - if v == "" { - continue - } - if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { - return time.Duration(retryAfter) * retry.units - } else if d := retry.custom(v); d > 0 { - return d - } - } - - return 0 -} - -// TypeOfT returns the type of the generic type param. -func TypeOfT[T any]() reflect.Type { - // you can't, at present, obtain the type of - // a type parameter, so this is the trick - return reflect.TypeOf((*T)(nil)).Elem() -} - -// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. -type TransportFunc func(*http.Request) (*http.Response, error) - -// Do implements the Transporter interface for the TransportFunc type. -func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { - return pf(req) -} - -// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string. -func ValidateModVer(moduleVersion string) error { - modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`) - if !modVerRegx.MatchString(moduleVersion) { - return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion) - } - return nil -} - -// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. -// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's -// context with an instance of this type. This is to prevent context values from flowing across disjoint -// requests which can have unintended side-effects. -type ContextWithDeniedValues struct { - context.Context -} - -// Value implements part of the [context.Context] interface. -// It acts as a deny-list for certain context keys. -func (c *ContextWithDeniedValues) Value(key any) any { - switch key.(type) { - case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: - return nil - default: - return c.Context.Value(key) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go deleted file mode 100644 index 2f3901bff..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -// Package log contains functionality for configuring logging behavior. -// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". -package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go deleted file mode 100644 index f260dac36..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// Package log provides functionality for configuring logging facilities. -package log - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" -) - -// Event is used to group entries. Each group can be toggled on or off. -type Event = log.Event - -const ( - // EventRequest entries contain information about HTTP requests. - // This includes information like the URL, query parameters, and headers. - EventRequest Event = "Request" - - // EventResponse entries contain information about HTTP responses. - // This includes information like the HTTP status code, headers, and request URL. - EventResponse Event = "Response" - - // EventResponseError entries contain information about HTTP responses that returned - // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code). - // This includes the contents of ResponseError.Error(). - EventResponseError Event = "ResponseError" - - // EventRetryPolicy entries contain information specific to the retry policy in use. - EventRetryPolicy Event = "Retry" - - // EventLRO entries contain information specific to long-running operations. - // This includes information like polling location, operation state, and sleep intervals. - EventLRO Event = "LongRunningOperation" -) - -// SetEvents is used to control which events are written to -// the log. By default all log events are writen. -// NOTE: this is not goroutine safe and should be called before using SDK clients. -func SetEvents(cls ...Event) { - log.SetEvents(cls...) -} - -// SetListener will set the Logger to write to the specified Listener. -// NOTE: this is not goroutine safe and should be called before using SDK clients. -func SetListener(lst func(Event, string)) { - log.SetListener(lst) -} - -// for testing purposes -func resetEvents() { - log.TestResetEvents() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go deleted file mode 100644 index fad2579ed..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -// Package policy contains the definitions needed for configuring in-box pipeline policies -// and creating custom policies. -package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go deleted file mode 100644 index d934f1dc5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ /dev/null @@ -1,187 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package policy - -import ( - "context" - "net/http" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" -) - -// Policy represents an extensibility point for the Pipeline that can mutate the specified -// Request and react to the received Response. -type Policy = exported.Policy - -// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. -type Transporter = exported.Transporter - -// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. -// Don't use this type directly, use runtime.NewRequest() instead. -type Request = exported.Request - -// ClientOptions contains optional settings for a client's pipeline. -// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. -// Zero-value fields will have their specified default values applied during use. -type ClientOptions struct { - // APIVersion overrides the default version requested of the service. - // Set with caution as this package version has not been tested with arbitrary service versions. - APIVersion string - - // Cloud specifies a cloud for the client. The default is Azure Public Cloud. - Cloud cloud.Configuration - - // Logging configures the built-in logging policy. - Logging LogOptions - - // Retry configures the built-in retry policy. - Retry RetryOptions - - // Telemetry configures the built-in telemetry policy. - Telemetry TelemetryOptions - - // TracingProvider configures the tracing provider. - // It defaults to a no-op tracer. - TracingProvider tracing.Provider - - // Transport sets the transport for HTTP requests. - Transport Transporter - - // PerCallPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request. - PerCallPolicies []Policy - - // PerRetryPolicies contains custom policies to inject into the pipeline. - // Each policy is executed once per request, and for each retry of that request. - PerRetryPolicies []Policy -} - -// LogOptions configures the logging policy's behavior. -type LogOptions struct { - // IncludeBody indicates if request and response bodies should be included in logging. - // The default value is false. - // NOTE: enabling this can lead to disclosure of sensitive information, use with care. - IncludeBody bool - - // AllowedHeaders is the slice of headers to log with their values intact. - // All headers not in the slice will have their values REDACTED. - // Applies to request and response headers. - AllowedHeaders []string - - // AllowedQueryParams is the slice of query parameters to log with their values intact. - // All query parameters not in the slice will have their values REDACTED. - AllowedQueryParams []string -} - -// RetryOptions configures the retry policy's behavior. -// Zero-value fields will have their specified default values applied during use. -// This allows for modification of a subset of fields. -type RetryOptions struct { - // MaxRetries specifies the maximum number of attempts a failed operation will be retried - // before producing an error. - // The default value is three. A value less than zero means one try and no retries. - MaxRetries int32 - - // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. - // This is disabled by default. Specify a value greater than zero to enable. - // NOTE: Setting this to a small value might cause premature HTTP request time-outs. - TryTimeout time.Duration - - // RetryDelay specifies the initial amount of delay to use before retrying an operation. - // The value is used only if the HTTP response does not contain a Retry-After header. - // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. - // The default value is four seconds. A value less than zero means no delay between retries. - RetryDelay time.Duration - - // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. - // Typically the value is greater than or equal to the value specified in RetryDelay. - // The default Value is 60 seconds. A value less than zero means there is no cap. - MaxRetryDelay time.Duration - - // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. - // A nil slice will use the following values. - // http.StatusRequestTimeout 408 - // http.StatusTooManyRequests 429 - // http.StatusInternalServerError 500 - // http.StatusBadGateway 502 - // http.StatusServiceUnavailable 503 - // http.StatusGatewayTimeout 504 - // Specifying values will replace the default values. - // Specifying an empty slice will disable retries for HTTP status codes. - StatusCodes []int - - // ShouldRetry evaluates if the retry policy should retry the request. - // When specified, the function overrides comparison against the list of - // HTTP status codes and error checking within the retry policy. Context - // and NonRetriable errors remain evaluated before calling ShouldRetry. - // The *http.Response and error parameters are mutually exclusive, i.e. - // if one is nil, the other is not nil. - // A return value of true means the retry policy should retry. - ShouldRetry func(*http.Response, error) bool -} - -// TelemetryOptions configures the telemetry policy's behavior. -type TelemetryOptions struct { - // ApplicationID is an application-specific identification string to add to the User-Agent. - // It has a maximum length of 24 characters and must not contain any spaces. - ApplicationID string - - // Disabled will prevent the addition of any telemetry data to the User-Agent. - Disabled bool -} - -// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. -type TokenRequestOptions = exported.TokenRequestOptions - -// BearerTokenOptions configures the bearer token policy's behavior. -type BearerTokenOptions struct { - // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request. - // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from - // its given credential. - AuthorizationHandler AuthorizationHandler -} - -// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. -type AuthorizationHandler struct { - // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token - // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, - // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't - // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a - // token from its credential according to its configuration. - OnRequest func(*Request, func(TokenRequestOptions) error) error - - // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the - // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible - // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's - // given credential. Implementations that need to perform I/O should use the Request's context, available from - // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, - // the policy will return any 401 response to the client. - OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error -} - -// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. -// The resp parameter will contain the HTTP response after the request has completed. -func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { - return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) -} - -// WithHTTPHeader adds the specified http.Header to the parent context. -// Use this to specify custom HTTP headers at the API-call level. -// Any overlapping headers will have their values replaced with the values specified here. -func WithHTTPHeader(parent context.Context, header http.Header) context.Context { - return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) -} - -// WithRetryOptions adds the specified RetryOptions to the parent context. -// Use this to specify custom RetryOptions at the API-call level. -func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { - return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go deleted file mode 100644 index c9cfa438c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -// Package runtime contains various facilities for creating requests and handling responses. -// The content is intended for SDK authors. -package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go deleted file mode 100644 index c0d56158e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" -) - -// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. -// Call this when a service request returns a non-successful status code. -// The error code will be extracted from the *http.Response, either from the x-ms-error-code -// header (preferred) or attempted to be parsed from the response body. -func NewResponseError(resp *http.Response) error { - return exported.NewResponseError(resp) -} - -// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode. -// Use this variant when the error code is in a non-standard location. -func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error { - return exported.NewResponseErrorWithErrorCode(resp, errorCode) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go deleted file mode 100644 index cffe692d7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ /dev/null @@ -1,128 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "reflect" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" -) - -// PagingHandler contains the required data for constructing a Pager. -type PagingHandler[T any] struct { - // More returns a boolean indicating if there are more pages to fetch. - // It uses the provided page to make the determination. - More func(T) bool - - // Fetcher fetches the first and subsequent pages. - Fetcher func(context.Context, *T) (T, error) - - // Tracer contains the Tracer from the client that's creating the Pager. - Tracer tracing.Tracer -} - -// Pager provides operations for iterating over paged responses. -type Pager[T any] struct { - current *T - handler PagingHandler[T] - tracer tracing.Tracer - firstPage bool -} - -// NewPager creates an instance of Pager using the specified PagingHandler. -// Pass a non-nil T for firstPage if the first page has already been retrieved. -func NewPager[T any](handler PagingHandler[T]) *Pager[T] { - return &Pager[T]{ - handler: handler, - tracer: handler.Tracer, - firstPage: true, - } -} - -// More returns true if there are more pages to retrieve. -func (p *Pager[T]) More() bool { - if p.current != nil { - return p.handler.More(*p.current) - } - return true -} - -// NextPage advances the pager to the next page. -func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { - if p.current != nil { - if p.firstPage { - // we get here if it's an LRO-pager, we already have the first page - p.firstPage = false - return *p.current, nil - } else if !p.handler.More(*p.current) { - return *new(T), errors.New("no more pages") - } - } else { - // non-LRO case, first page - p.firstPage = false - } - - var err error - ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) - defer func() { endSpan(err) }() - - resp, err := p.handler.Fetcher(ctx, p.current) - if err != nil { - return *new(T), err - } - p.current = &resp - return *p.current, nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. -func (p *Pager[T]) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &p.current) -} - -// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. -type FetcherForNextLinkOptions struct { - // NextReq is the func to be called when requesting subsequent pages. - // Used for paged operations that have a custom next link operation. - NextReq func(context.Context, string) (*policy.Request, error) -} - -// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. -// - ctx is the [context.Context] controlling the lifetime of the HTTP operation -// - pl is the [Pipeline] used to dispatch the HTTP request -// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested -// - firstReq is the func to be called when creating the request for the first page -// - options contains any optional parameters, pass nil to accept the default values -func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { - var req *policy.Request - var err error - if nextLink == "" { - req, err = firstReq(ctx) - } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { - if options != nil && options.NextReq != nil { - req, err = options.NextReq(ctx, nextLink) - } else { - req, err = NewRequest(ctx, http.MethodGet, nextLink) - } - } - if err != nil { - return nil, err - } - resp, err := pl.Do(req) - if err != nil { - return nil, err - } - if !HasStatusCode(resp, http.StatusOK) { - return nil, NewResponseError(resp) - } - return resp, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go deleted file mode 100644 index 6b1f5c083..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ /dev/null @@ -1,94 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// PipelineOptions contains Pipeline options for SDK developers -type PipelineOptions struct { - // AllowedHeaders is the slice of headers to log with their values intact. - // All headers not in the slice will have their values REDACTED. - // Applies to request and response headers. - AllowedHeaders []string - - // AllowedQueryParameters is the slice of query parameters to log with their values intact. - // All query parameters not in the slice will have their values REDACTED. - AllowedQueryParameters []string - - // APIVersion overrides the default version requested of the service. - // Set with caution as this package version has not been tested with arbitrary service versions. - APIVersion APIVersionOptions - - // PerCall contains custom policies to inject into the pipeline. - // Each policy is executed once per request. - PerCall []policy.Policy - - // PerRetry contains custom policies to inject into the pipeline. - // Each policy is executed once per request, and for each retry of that request. - PerRetry []policy.Policy - - // Tracing contains options used to configure distributed tracing. - Tracing TracingOptions -} - -// TracingOptions contains tracing options for SDK developers. -type TracingOptions struct { - // Namespace contains the value to use for the az.namespace span attribute. - Namespace string -} - -// Pipeline represents a primitive for sending HTTP requests and receiving responses. -// Its behavior can be extended by specifying policies during construction. -type Pipeline = exported.Pipeline - -// NewPipeline creates a pipeline from connection options, with any additional policies as specified. -// Policies from ClientOptions are placed after policies from PipelineOptions. -// The module and version parameters are used by the telemetry policy, when enabled. -func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { - cp := policy.ClientOptions{} - if options != nil { - cp = *options - } - if len(plOpts.AllowedHeaders) > 0 { - headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) - copy(headers, plOpts.AllowedHeaders) - headers = append(headers, cp.Logging.AllowedHeaders...) - cp.Logging.AllowedHeaders = headers - } - if len(plOpts.AllowedQueryParameters) > 0 { - qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) - copy(qp, plOpts.AllowedQueryParameters) - qp = append(qp, cp.Logging.AllowedQueryParams...) - cp.Logging.AllowedQueryParams = qp - } - // we put the includeResponsePolicy at the very beginning so that the raw response - // is populated with the final response (some policies might mutate the response) - policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)} - if cp.APIVersion != "" { - policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion)) - } - if !cp.Telemetry.Disabled { - policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) - } - policies = append(policies, plOpts.PerCall...) - policies = append(policies, cp.PerCallPolicies...) - policies = append(policies, NewRetryPolicy(&cp.Retry)) - policies = append(policies, plOpts.PerRetry...) - policies = append(policies, cp.PerRetryPolicies...) - policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) - policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) - policies = append(policies, NewLogPolicy(&cp.Logging)) - policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) - transport := cp.Transport - if transport == nil { - transport = defaultHTTPClient - } - return exported.NewPipeline(transport, policies...) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go deleted file mode 100644 index e5309aa6c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "errors" - "fmt" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// APIVersionOptions contains options for API versions -type APIVersionOptions struct { - // Location indicates where to set the version on a request, for example in a header or query param - Location APIVersionLocation - // Name is the name of the header or query parameter, for example "api-version" - Name string -} - -// APIVersionLocation indicates which part of a request identifies the service version -type APIVersionLocation int - -const ( - // APIVersionLocationQueryParam indicates a query parameter - APIVersionLocationQueryParam = 0 - // APIVersionLocationHeader indicates a header - APIVersionLocationHeader = 1 -) - -// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version -// isn't empty and opts.Name is empty, Do will return an error. -func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy { - if opts == nil { - opts = &APIVersionOptions{} - } - return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version} -} - -// apiVersionPolicy enables users to set the API version of every request a client sends. -type apiVersionPolicy struct { - // location indicates whether "name" refers to a query parameter or header. - location APIVersionLocation - - // name of the query param or header whose value should be overridden; provided by the client. - name string - - // version is the value (provided by the user) that replaces the default version value. - version string -} - -// Do sets the request's API version, if the policy is configured to do so, replacing any prior value. -func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) { - if a.version != "" { - if a.name == "" { - // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions - return nil, errors.New("this client doesn't support overriding its API version") - } - switch a.location { - case APIVersionLocationHeader: - req.Raw().Header.Set(a.name, a.version) - case APIVersionLocationQueryParam: - q := req.Raw().URL.Query() - q.Set(a.name, a.version) - req.Raw().URL.RawQuery = q.Encode() - default: - return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location) - } - } - return req.Next() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go deleted file mode 100644 index f0f280355..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "errors" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" - "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" -) - -// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. -type BearerTokenPolicy struct { - // mainResource is the resource to be retreived using the tenant specified in the credential - mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] - // the following fields are read-only - authzHandler policy.AuthorizationHandler - cred exported.TokenCredential - scopes []string -} - -type acquiringResourceState struct { - req *policy.Request - p *BearerTokenPolicy - tro policy.TokenRequestOptions -} - -// acquire acquires or updates the resource; only one -// thread/goroutine at a time ever calls this function -func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { - tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) - if err != nil { - return exported.AccessToken{}, time.Time{}, err - } - return tk, tk.ExpiresOn, nil -} - -// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. -// cred: an azcore.TokenCredential implementation such as a credential object from azidentity -// scopes: the list of permission scopes required for the token. -// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. -func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { - if opts == nil { - opts = &policy.BearerTokenOptions{} - } - return &BearerTokenPolicy{ - authzHandler: opts.AuthorizationHandler, - cred: cred, - scopes: scopes, - mainResource: temporal.NewResource(acquire), - } -} - -// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential -func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { - return func(tro policy.TokenRequestOptions) error { - as := acquiringResourceState{p: b, req: req, tro: tro} - tk, err := b.mainResource.Get(as) - if err != nil { - return err - } - req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) - return nil - } -} - -// Do authorizes a request with a bearer token -func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { - // skip adding the authorization header if no TokenCredential was provided. - // this prevents a panic that might be hard to diagnose and allows testing - // against http endpoints that don't require authentication. - if b.cred == nil { - return req.Next() - } - - if err := checkHTTPSForAuth(req); err != nil { - return nil, err - } - - var err error - if b.authzHandler.OnRequest != nil { - err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) - } else { - err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) - } - if err != nil { - return nil, errorinfo.NonRetriableError(err) - } - - res, err := req.Next() - if err != nil { - return nil, err - } - - if res.StatusCode == http.StatusUnauthorized { - b.mainResource.Expire() - if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { - if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { - res, err = req.Next() - } - } - } - if err != nil { - err = errorinfo.NonRetriableError(err) - } - return res, err -} - -func checkHTTPSForAuth(req *policy.Request) error { - if strings.ToLower(req.Raw().URL.Scheme) != "https" { - return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go deleted file mode 100644 index 99dc029f0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "fmt" - "net/http" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" -) - -// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. -func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { - resp, err := req.Next() - if err != nil { - return resp, err - } - var opValues bodyDownloadPolicyOpValues - // don't skip downloading error response bodies - if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { - return resp, err - } - // Either bodyDownloadPolicyOpValues was not specified (so skip is false) - // or it was specified and skip is false: don't skip downloading the body - _, err = Payload(resp) - if err != nil { - return resp, newBodyDownloadError(err, req) - } - return resp, err -} - -// bodyDownloadPolicyOpValues is the struct containing the per-operation values -type bodyDownloadPolicyOpValues struct { - Skip bool -} - -type bodyDownloadError struct { - err error -} - -func newBodyDownloadError(err error, req *policy.Request) error { - // on failure, only retry the request for idempotent operations. - // we currently identify them as DELETE, GET, and PUT requests. - if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { - // error is safe for retry - return err - } - // wrap error to avoid retries - return &bodyDownloadError{ - err: err, - } -} - -func (b *bodyDownloadError) Error() string { - return fmt.Sprintf("body download policy: %s", b.err.Error()) -} - -func (b *bodyDownloadError) NonRetriable() { - // marker method -} - -func (b *bodyDownloadError) Unwrap() error { - return b.err -} - -var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go deleted file mode 100644 index c230af0af..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request -func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { - // check if any custom HTTP headers have been specified - if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { - for k, v := range header.(http.Header) { - // use Set to replace any existing value - // it also canonicalizes the header key - req.Raw().Header.Set(k, v[0]) - // add any remaining values - for i := 1; i < len(v); i++ { - req.Raw().Header.Add(k, v[i]) - } - } - } - return req.Next() -} - -// WithHTTPHeader adds the specified http.Header to the parent context. -// Use this to specify custom HTTP headers at the API-call level. -// Any overlapping headers will have their values replaced with the values specified here. -// Deprecated: use [policy.WithHTTPHeader] instead. -func WithHTTPHeader(parent context.Context, header http.Header) context.Context { - return policy.WithHTTPHeader(parent, header) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go deleted file mode 100644 index 3df1c1218..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" -) - -const ( - attrHTTPMethod = "http.method" - attrHTTPURL = "http.url" - attrHTTPUserAgent = "http.user_agent" - attrHTTPStatusCode = "http.status_code" - - attrAZClientReqID = "az.client_request_id" - attrAZServiceReqID = "az.service_request_id" - - attrNetPeerName = "net.peer.name" -) - -// newHTTPTracePolicy creates a new instance of the httpTracePolicy. -// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace -func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { - return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} -} - -// httpTracePolicy is a policy that creates a trace for the HTTP request and its response -type httpTracePolicy struct { - allowedQP map[string]struct{} -} - -// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. -func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { - rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) - if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { - attributes := []tracing.Attribute{ - {Key: attrHTTPMethod, Value: req.Raw().Method}, - {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, - {Key: attrNetPeerName, Value: req.Raw().URL.Host}, - } - - if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { - attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) - } - if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { - attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) - } - - ctx := req.Raw().Context() - ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ - Kind: tracing.SpanKindClient, - Attributes: attributes, - }) - - defer func() { - if resp != nil { - span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) - if resp.StatusCode > 399 { - span.SetStatus(tracing.SpanStatusError, resp.Status) - } - if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { - span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) - } - } else if err != nil { - var urlErr *url.Error - if errors.As(err, &urlErr) { - // calling *url.Error.Error() will include the unsanitized URL - // which we don't want. in addition, we already have the HTTP verb - // and sanitized URL in the trace so we aren't losing any info - err = urlErr.Err - } - span.SetStatus(tracing.SpanStatusError, err.Error()) - } - span.End() - }() - - req = req.WithContext(ctx) - } - resp, err = req.Next() - return -} - -// StartSpanOptions contains the optional values for StartSpan. -type StartSpanOptions struct { - // for future expansion -} - -// StartSpan starts a new tracing span. -// You must call the returned func to terminate the span. Pass the applicable error -// if the span will exit with an error condition. -// - ctx is the parent context of the newly created context -// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") -// - tracer is the client's Tracer for creating spans -// - options contains optional values. pass nil to accept any default values -func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { - if !tracer.Enabled() { - return ctx, func(err error) {} - } - - // we MUST propagate the active tracer before returning so that the trace policy can access it - ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) - - const newSpanKind = tracing.SpanKindInternal - if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { - // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), - // then the span for Bar() must be suppressed. however, if Bar() makes a REST - // call, then Bar's HTTP span must be a child of Foo's span. - // however, there is an exception to this rule. if the SDK method Foo() is a - // messaging producer/consumer, and it takes a callback that's a SDK method - // Bar(), then the span for Bar() must _not_ be suppressed. - if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { - return ctx, func(err error) {} - } - } - ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ - Kind: newSpanKind, - }) - ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) - return ctx, func(err error) { - if err != nil { - errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) - span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) - } - span.End() - } -} - -// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. -type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go deleted file mode 100644 index bb00f6c2f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request -func includeResponsePolicy(req *policy.Request) (*http.Response, error) { - resp, err := req.Next() - if resp == nil { - return resp, err - } - if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { - httpOut := httpOutRaw.(**http.Response) - *httpOut = resp - } - return resp, err -} - -// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. -// The resp parameter will contain the HTTP response after the request has completed. -// Deprecated: use [policy.WithCaptureResponse] instead. -func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { - return policy.WithCaptureResponse(parent, resp) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go deleted file mode 100644 index 6f577fa7a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. -type KeyCredentialPolicy struct { - cred *exported.KeyCredential - header string - prefix string -} - -// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. -type KeyCredentialPolicyOptions struct { - // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. - Prefix string -} - -// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. -// - cred is the [azcore.KeyCredential] used to authenticate with the service -// - header is the name of the HTTP request header in which the key is placed -// - options contains optional configuration, pass nil to accept the default values -func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { - if options == nil { - options = &KeyCredentialPolicyOptions{} - } - return &KeyCredentialPolicy{ - cred: cred, - header: header, - prefix: options.Prefix, - } -} - -// Do implementes the Do method on the [policy.Polilcy] interface. -func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { - // skip adding the authorization header if no KeyCredential was provided. - // this prevents a panic that might be hard to diagnose and allows testing - // against http endpoints that don't require authentication. - if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { - return nil, err - } - val := exported.KeyCredentialGet(k.cred) - if k.prefix != "" { - val = k.prefix + val - } - req.Raw().Header.Add(k.header, val) - } - return req.Next() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go deleted file mode 100644 index f048d7fb5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ /dev/null @@ -1,264 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" -) - -type logPolicy struct { - includeBody bool - allowedHeaders map[string]struct{} - allowedQP map[string]struct{} -} - -// NewLogPolicy creates a request/response logging policy object configured using the specified options. -// Pass nil to accept the default values; this is the same as passing a zero-value options. -func NewLogPolicy(o *policy.LogOptions) policy.Policy { - if o == nil { - o = &policy.LogOptions{} - } - // construct default hash set of allowed headers - allowedHeaders := map[string]struct{}{ - "accept": {}, - "cache-control": {}, - "connection": {}, - "content-length": {}, - "content-type": {}, - "date": {}, - "etag": {}, - "expires": {}, - "if-match": {}, - "if-modified-since": {}, - "if-none-match": {}, - "if-unmodified-since": {}, - "last-modified": {}, - "ms-cv": {}, - "pragma": {}, - "request-id": {}, - "retry-after": {}, - "server": {}, - "traceparent": {}, - "transfer-encoding": {}, - "user-agent": {}, - "www-authenticate": {}, - "x-ms-request-id": {}, - "x-ms-client-request-id": {}, - "x-ms-return-client-request-id": {}, - } - // add any caller-specified allowed headers to the set - for _, ah := range o.AllowedHeaders { - allowedHeaders[strings.ToLower(ah)] = struct{}{} - } - // now do the same thing for query params - allowedQP := getAllowedQueryParams(o.AllowedQueryParams) - return &logPolicy{ - includeBody: o.IncludeBody, - allowedHeaders: allowedHeaders, - allowedQP: allowedQP, - } -} - -// getAllowedQueryParams merges the default set of allowed query parameters -// with a custom set (usually comes from client options). -func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} { - allowedQP := map[string]struct{}{ - "api-version": {}, - } - for _, qp := range customAllowedQP { - allowedQP[strings.ToLower(qp)] = struct{}{} - } - return allowedQP -} - -// logPolicyOpValues is the struct containing the per-operation values -type logPolicyOpValues struct { - try int32 - start time.Time -} - -func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { - // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. - var opValues logPolicyOpValues - if req.OperationValue(&opValues); opValues.start.IsZero() { - opValues.start = time.Now() // If this is the 1st try, record this operation's start time - } - opValues.try++ // The first try is #1 (not #0) - req.SetOperationValue(opValues) - - // Log the outgoing request as informational - if log.Should(log.EventRequest) { - b := &bytes.Buffer{} - fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) - p.writeRequestWithResponse(b, req, nil, nil) - var err error - if p.includeBody { - err = writeReqBody(req, b) - } - log.Write(log.EventRequest, b.String()) - if err != nil { - return nil, err - } - } - - // Set the time for this particular retry operation and then Do the operation. - tryStart := time.Now() - response, err := req.Next() // Make the request - tryEnd := time.Now() - tryDuration := tryEnd.Sub(tryStart) - opDuration := tryEnd.Sub(opValues.start) - - if log.Should(log.EventResponse) { - // We're going to log this; build the string to log - b := &bytes.Buffer{} - fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) - if err != nil { // This HTTP request did not get a response from the service - fmt.Fprint(b, "REQUEST ERROR\n") - } else { - fmt.Fprint(b, "RESPONSE RECEIVED\n") - } - - p.writeRequestWithResponse(b, req, response, err) - if err != nil { - // skip frames runtime.Callers() and runtime.StackTrace() - b.WriteString(diag.StackTrace(2, 32)) - } else if p.includeBody { - err = writeRespBody(response, b) - } - log.Write(log.EventResponse, b.String()) - } - return response, err -} - -const redactedValue = "REDACTED" - -// getSanitizedURL returns a sanitized string for the provided url.URL -func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string { - // redact applicable query params - qp := u.Query() - for k := range qp { - if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok { - qp.Set(k, redactedValue) - } - } - u.RawQuery = qp.Encode() - return u.String() -} - -// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are -// not nil, then these are also written into the Buffer. -func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { - // Write the request into the buffer. - fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n") - p.writeHeader(b, req.Raw().Header) - if resp != nil { - fmt.Fprintln(b, " --------------------------------------------------------------------------------") - fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") - p.writeHeader(b, resp.Header) - } - if err != nil { - fmt.Fprintln(b, " --------------------------------------------------------------------------------") - fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") - } -} - -// formatHeaders appends an HTTP request's or response's header into a Buffer. -func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { - if len(header) == 0 { - b.WriteString(" (no headers)\n") - return - } - keys := make([]string, 0, len(header)) - // Alphabetize the headers - for k := range header { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - // don't use Get() as it will canonicalize k which might cause a mismatch - value := header[k][0] - // redact all header values not in the allow-list - if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { - value = redactedValue - } - fmt.Fprintf(b, " %s: %+v\n", k, value) - } -} - -// returns true if the request/response body should be logged. -// this is determined by looking at the content-type header value. -func shouldLogBody(b *bytes.Buffer, contentType string) bool { - contentType = strings.ToLower(contentType) - if strings.HasPrefix(contentType, "text") || - strings.Contains(contentType, "json") || - strings.Contains(contentType, "xml") { - return true - } - fmt.Fprintf(b, " Skip logging body for %s\n", contentType) - return false -} - -// writes to a buffer, used for logging purposes -func writeReqBody(req *policy.Request, b *bytes.Buffer) error { - if req.Raw().Body == nil { - fmt.Fprint(b, " Request contained no body\n") - return nil - } - if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { - return nil - } - body, err := io.ReadAll(req.Raw().Body) - if err != nil { - fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) - return err - } - if err := req.RewindBody(); err != nil { - return err - } - logBody(b, body) - return nil -} - -// writes to a buffer, used for logging purposes -func writeRespBody(resp *http.Response, b *bytes.Buffer) error { - ct := resp.Header.Get(shared.HeaderContentType) - if ct == "" { - fmt.Fprint(b, " Response contained no body\n") - return nil - } else if !shouldLogBody(b, ct) { - return nil - } - body, err := Payload(resp) - if err != nil { - fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) - return err - } - if len(body) > 0 { - logBody(b, body) - } else { - fmt.Fprint(b, " Response contained no body\n") - } - return nil -} - -func logBody(b *bytes.Buffer, body []byte) { - fmt.Fprintln(b, " --------------------------------------------------------------------------------") - fmt.Fprintln(b, string(body)) - fmt.Fprintln(b, " --------------------------------------------------------------------------------") -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go deleted file mode 100644 index 360a7f211..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" -) - -type requestIDPolicy struct{} - -// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header -func NewRequestIDPolicy() policy.Policy { - return &requestIDPolicy{} -} - -func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { - if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" { - id, err := uuid.New() - if err != nil { - return nil, err - } - req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String()) - } - - return req.Next() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go deleted file mode 100644 index 04d7bb4ec..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ /dev/null @@ -1,255 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "errors" - "io" - "math" - "math/rand" - "net/http" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" - "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" -) - -const ( - defaultMaxRetries = 3 -) - -func setDefaults(o *policy.RetryOptions) { - if o.MaxRetries == 0 { - o.MaxRetries = defaultMaxRetries - } else if o.MaxRetries < 0 { - o.MaxRetries = 0 - } - - // SDK guidelines specify the default MaxRetryDelay is 60 seconds - if o.MaxRetryDelay == 0 { - o.MaxRetryDelay = 60 * time.Second - } else if o.MaxRetryDelay < 0 { - // not really an unlimited cap, but sufficiently large enough to be considered as such - o.MaxRetryDelay = math.MaxInt64 - } - if o.RetryDelay == 0 { - o.RetryDelay = 800 * time.Millisecond - } else if o.RetryDelay < 0 { - o.RetryDelay = 0 - } - if o.StatusCodes == nil { - // NOTE: if you change this list, you MUST update the docs in policy/policy.go - o.StatusCodes = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - } -} - -func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - delay := time.Duration((1< o.MaxRetryDelay { - delay = o.MaxRetryDelay - } - return delay -} - -// NewRetryPolicy creates a policy object configured using the specified options. -// Pass nil to accept the default values; this is the same as passing a zero-value options. -func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { - if o == nil { - o = &policy.RetryOptions{} - } - p := &retryPolicy{options: *o} - return p -} - -type retryPolicy struct { - options policy.RetryOptions -} - -func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { - options := p.options - // check if the retry options have been overridden for this call - if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { - options = override.(policy.RetryOptions) - } - setDefaults(&options) - // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) - // When to retry: connection failure or temporary/timeout. - var rwbody *retryableRequestBody - if req.Body() != nil { - // wrap the body so we control when it's actually closed. - // do this outside the for loop so defers don't accumulate. - rwbody = &retryableRequestBody{body: req.Body()} - defer rwbody.realClose() - } - try := int32(1) - for { - resp = nil // reset - log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) - - // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because - // the stream may not be at offset 0 when we first get it and we want the same behavior for the - // 1st try as for additional tries. - err = req.RewindBody() - if err != nil { - return - } - // RewindBody() restores Raw().Body to its original state, so set our rewindable after - if rwbody != nil { - req.Raw().Body = rwbody - } - - if options.TryTimeout == 0 { - clone := req.Clone(req.Raw().Context()) - resp, err = clone.Next() - } else { - // Set the per-try time for this particular retry operation and then Do the operation. - tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) - clone := req.Clone(tryCtx) - resp, err = clone.Next() // Make the request - // if the body was already downloaded or there was an error it's safe to cancel the context now - if err != nil { - tryCancel() - } else if exported.PayloadDownloaded(resp) { - tryCancel() - } else { - // must cancel the context after the body has been read and closed - resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} - } - } - if err == nil { - log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) - } else { - log.Writef(log.EventRetryPolicy, "error %v", err) - } - - if ctxErr := req.Raw().Context().Err(); ctxErr != nil { - // don't retry if the parent context has been cancelled or its deadline exceeded - err = ctxErr - log.Writef(log.EventRetryPolicy, "abort due to %v", err) - return - } - - // check if the error is not retriable - var nre errorinfo.NonRetriable - if errors.As(err, &nre) { - // the error says it's not retriable so don't retry - log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) - return - } - - if options.ShouldRetry != nil { - // a non-nil ShouldRetry overrides our HTTP status code check - if !options.ShouldRetry(resp, err) { - // predicate says we shouldn't retry - log.Write(log.EventRetryPolicy, "exit due to ShouldRetry") - return - } - } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) { - // if there is no error and the response code isn't in the list of retry codes then we're done. - log.Write(log.EventRetryPolicy, "exit due to non-retriable status code") - return - } - - if try == options.MaxRetries+1 { - // max number of tries has been reached, don't sleep again - log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) - return - } - - // use the delay from retry-after if available - delay := shared.RetryAfter(resp) - if delay <= 0 { - delay = calcDelay(options, try) - } else if delay > options.MaxRetryDelay { - // the retry-after delay exceeds the the cap so don't retry - log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay) - return - } - - // drain before retrying so nothing is leaked - Drain(resp) - - log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) - select { - case <-time.After(delay): - try++ - case <-req.Raw().Context().Done(): - err = req.Raw().Context().Err() - log.Writef(log.EventRetryPolicy, "abort due to %v", err) - return - } - } -} - -// WithRetryOptions adds the specified RetryOptions to the parent context. -// Use this to specify custom RetryOptions at the API-call level. -// Deprecated: use [policy.WithRetryOptions] instead. -func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { - return policy.WithRetryOptions(parent, options) -} - -// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) - -// This struct is used when sending a body to the network -type retryableRequestBody struct { - body io.ReadSeeker // Seeking is required to support retries -} - -// Read reads a block of data from an inner stream and reports progress -func (b *retryableRequestBody) Read(p []byte) (n int, err error) { - return b.body.Read(p) -} - -func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { - return b.body.Seek(offset, whence) -} - -func (b *retryableRequestBody) Close() error { - // We don't want the underlying transport to close the request body on transient failures so this is a nop. - // The retry policy closes the request body upon success. - return nil -} - -func (b *retryableRequestBody) realClose() error { - if c, ok := b.body.(io.Closer); ok { - return c.Close() - } - return nil -} - -// ********** The following type/methods implement the contextCancelReadCloser - -// contextCancelReadCloser combines an io.ReadCloser with a cancel func. -// it ensures the cancel func is invoked once the body has been read and closed. -type contextCancelReadCloser struct { - cf context.CancelFunc - body io.ReadCloser -} - -func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { - return rc.body.Read(p) -} - -func (rc *contextCancelReadCloser) Close() error { - err := rc.body.Close() - rc.cf() - return err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go deleted file mode 100644 index ebe2b7772..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "net/http" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. -type SASCredentialPolicy struct { - cred *exported.SASCredential - header string -} - -// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. -type SASCredentialPolicyOptions struct { - // placeholder for future optional values -} - -// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. -// - cred is the [azcore.SASCredential] used to authenticate with the service -// - header is the name of the HTTP request header in which the shared access signature is placed -// - options contains optional configuration, pass nil to accept the default values -func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { - return &SASCredentialPolicy{ - cred: cred, - header: header, - } -} - -// Do implementes the Do method on the [policy.Polilcy] interface. -func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { - // skip adding the authorization header if no SASCredential was provided. - // this prevents a panic that might be hard to diagnose and allows testing - // against http endpoints that don't require authentication. - if k.cred != nil { - if err := checkHTTPSForAuth(req); err != nil { - return nil, err - } - req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) - } - return req.Next() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go deleted file mode 100644 index 80a903546..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "bytes" - "fmt" - "net/http" - "os" - "runtime" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -type telemetryPolicy struct { - telemetryValue string -} - -// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. -// The format is [ ]azsdk-go-/ . -// Pass nil to accept the default values; this is the same as passing a zero-value options. -func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { - if o == nil { - o = &policy.TelemetryOptions{} - } - tp := telemetryPolicy{} - if o.Disabled { - return &tp - } - b := &bytes.Buffer{} - // normalize ApplicationID - if o.ApplicationID != "" { - o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") - if len(o.ApplicationID) > 24 { - o.ApplicationID = o.ApplicationID[:24] - } - b.WriteString(o.ApplicationID) - b.WriteRune(' ') - } - // mod might be the fully qualified name. in that case, we just want the package name - if i := strings.LastIndex(mod, "/"); i > -1 { - mod = mod[i+1:] - } - b.WriteString(formatTelemetry(mod, ver)) - b.WriteRune(' ') - b.WriteString(platformInfo) - tp.telemetryValue = b.String() - return &tp -} - -func formatTelemetry(comp, ver string) string { - return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) -} - -func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { - if p.telemetryValue == "" { - return req.Next() - } - // preserve the existing User-Agent string - if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { - p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) - } - req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) - return req.Next() -} - -// NOTE: the ONLY function that should write to this variable is this func -var platformInfo = func() string { - operatingSystem := runtime.GOOS // Default OS string - switch operatingSystem { - case "windows": - operatingSystem = os.Getenv("OS") // Get more specific OS information - case "linux": // accept default OS info - case "freebsd": // accept default OS info - } - return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) -}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go deleted file mode 100644 index c373f6896..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ /dev/null @@ -1,384 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "encoding/json" - "errors" - "flag" - "fmt" - "net/http" - "reflect" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" - "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" -) - -// FinalStateVia is the enumerated type for the possible final-state-via values. -type FinalStateVia = pollers.FinalStateVia - -const ( - // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. - FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp - - // FinalStateViaLocation indicates the final payload comes from the Location URL. - FinalStateViaLocation = pollers.FinalStateViaLocation - - // FinalStateViaOriginalURI indicates the final payload comes from the original URL. - FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI - - // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. - FinalStateViaOpLocation = pollers.FinalStateViaOpLocation -) - -// NewPollerOptions contains the optional parameters for NewPoller. -type NewPollerOptions[T any] struct { - // FinalStateVia contains the final-state-via value for the LRO. - FinalStateVia FinalStateVia - - // Response contains a preconstructed response type. - // The final payload will be unmarshaled into it and returned. - Response *T - - // Handler[T] contains a custom polling implementation. - Handler PollingHandler[T] - - // Tracer contains the Tracer from the client that's creating the Poller. - Tracer tracing.Tracer -} - -// NewPoller creates a Poller based on the provided initial response. -func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { - if options == nil { - options = &NewPollerOptions[T]{} - } - result := options.Response - if result == nil { - result = new(T) - } - if options.Handler != nil { - return &Poller[T]{ - op: options.Handler, - resp: resp, - result: result, - tracer: options.Tracer, - }, nil - } - - defer resp.Body.Close() - // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). - // ideally the codegen should return an error if the initial response failed and not even create a poller. - if !poller.StatusCodeValid(resp) { - return nil, errors.New("the operation failed or was cancelled") - } - - // determine the polling method - var opr PollingHandler[T] - var err error - if fake.Applicable(resp) { - opr, err = fake.New[T](pl, resp) - } else if async.Applicable(resp) { - // async poller must be checked first as it can also have a location header - opr, err = async.New[T](pl, resp, options.FinalStateVia) - } else if op.Applicable(resp) { - // op poller must be checked before loc as it can also have a location header - opr, err = op.New[T](pl, resp, options.FinalStateVia) - } else if loc.Applicable(resp) { - opr, err = loc.New[T](pl, resp) - } else if body.Applicable(resp) { - // must test body poller last as it's a subset of the other pollers. - // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) - opr, err = body.New[T](pl, resp) - } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { - // if we get here it means we have a 202 with no polling headers. - // for DELETE and POST this is a hard error per ARM RPC spec. - return nil, errors.New("response is missing polling URL") - } else { - opr, err = pollers.NewNopPoller[T](resp) - } - - if err != nil { - return nil, err - } - return &Poller[T]{ - op: opr, - resp: resp, - result: result, - tracer: options.Tracer, - }, nil -} - -// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. -type NewPollerFromResumeTokenOptions[T any] struct { - // Response contains a preconstructed response type. - // The final payload will be unmarshaled into it and returned. - Response *T - - // Handler[T] contains a custom polling implementation. - Handler PollingHandler[T] - - // Tracer contains the Tracer from the client that's creating the Poller. - Tracer tracing.Tracer -} - -// NewPollerFromResumeToken creates a Poller from a resume token string. -func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { - if options == nil { - options = &NewPollerFromResumeTokenOptions[T]{} - } - result := options.Response - if result == nil { - result = new(T) - } - - if err := pollers.IsTokenValid[T](token); err != nil { - return nil, err - } - raw, err := pollers.ExtractToken(token) - if err != nil { - return nil, err - } - var asJSON map[string]interface{} - if err := json.Unmarshal(raw, &asJSON); err != nil { - return nil, err - } - - opr := options.Handler - // now rehydrate the poller based on the encoded poller type - if fake.CanResume(asJSON) { - opr, _ = fake.New[T](pl, nil) - } else if opr != nil { - log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) - } else if async.CanResume(asJSON) { - opr, _ = async.New[T](pl, nil, "") - } else if body.CanResume(asJSON) { - opr, _ = body.New[T](pl, nil) - } else if loc.CanResume(asJSON) { - opr, _ = loc.New[T](pl, nil) - } else if op.CanResume(asJSON) { - opr, _ = op.New[T](pl, nil, "") - } else { - return nil, fmt.Errorf("unhandled poller token %s", string(raw)) - } - if err := json.Unmarshal(raw, &opr); err != nil { - return nil, err - } - return &Poller[T]{ - op: opr, - result: result, - tracer: options.Tracer, - }, nil -} - -// PollingHandler[T] abstracts the differences among poller implementations. -type PollingHandler[T any] interface { - // Done returns true if the LRO has reached a terminal state. - Done() bool - - // Poll fetches the latest state of the LRO. - Poll(context.Context) (*http.Response, error) - - // Result is called once the LRO has reached a terminal state. It populates the out parameter - // with the result of the operation. - Result(ctx context.Context, out *T) error -} - -// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. -type Poller[T any] struct { - op PollingHandler[T] - resp *http.Response - err error - result *T - tracer tracing.Tracer - done bool -} - -// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. -type PollUntilDoneOptions struct { - // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. - // Pass zero to accept the default value (30s). - Frequency time.Duration -} - -// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. -// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. -// options: pass nil to accept the default values. -// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might -// benefit from a shorter or longer duration. -func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { - if options == nil { - options = &PollUntilDoneOptions{} - } - cp := *options - if cp.Frequency == 0 { - cp.Frequency = 30 * time.Second - } - - ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) - defer func() { endSpan(err) }() - - // skip the floor check when executing tests so they don't take so long - if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { - err = errors.New("polling frequency minimum is one second") - return - } - - start := time.Now() - logPollUntilDoneExit := func(v interface{}) { - log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) - } - log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) - if p.resp != nil { - // initial check for a retry-after header existing on the initial response - if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { - log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) - if err = shared.Delay(ctx, retryAfter); err != nil { - logPollUntilDoneExit(err) - return - } - } - } - // begin polling the endpoint until a terminal state is reached - for { - var resp *http.Response - resp, err = p.Poll(ctx) - if err != nil { - logPollUntilDoneExit(err) - return - } - if p.Done() { - logPollUntilDoneExit("succeeded") - res, err = p.Result(ctx) - return - } - d := cp.Frequency - if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { - log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) - d = retryAfter - } else { - log.Writef(log.EventLRO, "delay for %s", d.String()) - } - if err = shared.Delay(ctx, d); err != nil { - logPollUntilDoneExit(err) - return - } - } -} - -// Poll fetches the latest state of the LRO. It returns an HTTP response or error. -// If Poll succeeds, the poller's state is updated and the HTTP response is returned. -// If Poll fails, the poller's state is unmodified and the error is returned. -// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. -func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { - if p.Done() { - // the LRO has reached a terminal state, don't poll again - resp = p.resp - return - } - - ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) - defer func() { endSpan(err) }() - - resp, err = p.op.Poll(ctx) - if err != nil { - return - } - p.resp = resp - return -} - -// Done returns true if the LRO has reached a terminal state. -// Once a terminal state is reached, call Result(). -func (p *Poller[T]) Done() bool { - return p.op.Done() -} - -// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. -// If the LRO completed successfully, a populated instance of T is returned. -// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. -// Calling this on an LRO in a non-terminal state will return an error. -func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { - if !p.Done() { - err = errors.New("poller is in a non-terminal state") - return - } - if p.done { - // the result has already been retrieved, return the cached value - if p.err != nil { - err = p.err - return - } - res = *p.result - return - } - - ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) - defer func() { endSpan(err) }() - - err = p.op.Result(ctx, p.result) - var respErr *exported.ResponseError - if errors.As(err, &respErr) { - // the LRO failed. record the error - p.err = err - } else if err != nil { - // the call to Result failed, don't cache anything in this case - return - } - p.done = true - if p.err != nil { - err = p.err - return - } - res = *p.result - return -} - -// ResumeToken returns a value representing the poller that can be used to resume -// the LRO at a later time. ResumeTokens are unique per service operation. -// The token's format should be considered opaque and is subject to change. -// Calling this on an LRO in a terminal state will return an error. -func (p *Poller[T]) ResumeToken() (string, error) { - if p.Done() { - return "", errors.New("poller is in a terminal state") - } - tk, err := pollers.NewResumeToken[T](p.op) - if err != nil { - return "", err - } - return tk, err -} - -// extracts the type name from the string returned from reflect.Value.Name() -func shortenTypeName(s string) string { - // the value is formatted as follows - // Poller[module/Package.Type].Method - // we want to shorten the generic type parameter string to Type - // anything we don't recognize will be left as-is - begin := strings.Index(s, "[") - end := strings.Index(s, "]") - if begin == -1 || end == -1 { - return s - } - - typeName := s[begin+1 : end] - if i := strings.LastIndex(typeName, "."); i > -1 { - typeName = typeName[i+1:] - } - return s[:begin+1] + typeName + s[end:] -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go deleted file mode 100644 index bef05f2a3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "mime/multipart" - "net/url" - "path" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" -) - -// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when -// encoding/decoding a slice of bytes to/from a string. -type Base64Encoding = exported.Base64Encoding - -const ( - // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. - Base64StdFormat Base64Encoding = exported.Base64StdFormat - - // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. - Base64URLFormat Base64Encoding = exported.Base64URLFormat -) - -// NewRequest creates a new policy.Request with the specified input. -// The endpoint MUST be properly encoded before calling this function. -func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { - return exported.NewRequest(ctx, httpMethod, endpoint) -} - -// EncodeQueryParams will parse and encode any query parameters in the specified URL. -// Any semicolons will automatically be escaped. -func EncodeQueryParams(u string) (string, error) { - before, after, found := strings.Cut(u, "?") - if !found { - return u, nil - } - // starting in Go 1.17, url.ParseQuery will reject semicolons in query params. - // so, we must escape them first. note that this assumes that semicolons aren't - // being used as query param separators which is per the current RFC. - // for more info: - // https://github.com/golang/go/issues/25192 - // https://github.com/golang/go/issues/50034 - qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B")) - if err != nil { - return "", err - } - return before + "?" + qp.Encode(), nil -} - -// JoinPaths concatenates multiple URL path segments into one path, -// inserting path separation characters as required. JoinPaths will preserve -// query parameters in the root path -func JoinPaths(root string, paths ...string) string { - if len(paths) == 0 { - return root - } - - qps := "" - if strings.Contains(root, "?") { - splitPath := strings.Split(root, "?") - root, qps = splitPath[0], splitPath[1] - } - - p := path.Join(paths...) - // path.Join will remove any trailing slashes. - // if one was provided, preserve it. - if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") { - p += "/" - } - - if qps != "" { - p = p + "?" + qps - } - - if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") { - root = root[:len(root)-1] - } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") { - p = "/" + p - } - return root + p -} - -// EncodeByteArray will base-64 encode the byte slice v. -func EncodeByteArray(v []byte, format Base64Encoding) string { - return exported.EncodeByteArray(v, format) -} - -// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. -// The encoded value is treated as a JSON string. -func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { - // send as a JSON string - encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) - // tsp generated code can set Content-Type so we must prefer that - return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) -} - -// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. -func MarshalAsJSON(req *policy.Request, v interface{}) error { - b, err := json.Marshal(v) - if err != nil { - return fmt.Errorf("error marshalling type %T: %s", v, err) - } - // tsp generated code can set Content-Type so we must prefer that - return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) -} - -// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. -func MarshalAsXML(req *policy.Request, v interface{}) error { - b, err := xml.Marshal(v) - if err != nil { - return fmt.Errorf("error marshalling type %T: %s", v, err) - } - // inclue the XML header as some services require it - b = []byte(xml.Header + string(b)) - return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) -} - -// SetMultipartFormData writes the specified keys/values as multi-part form -// fields with the specified value. File content must be specified as a ReadSeekCloser. -// All other values are treated as string values. -func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { - body := bytes.Buffer{} - writer := multipart.NewWriter(&body) - - writeContent := func(fieldname, filename string, src io.Reader) error { - fd, err := writer.CreateFormFile(fieldname, filename) - if err != nil { - return err - } - // copy the data to the form file - if _, err = io.Copy(fd, src); err != nil { - return err - } - return nil - } - - for k, v := range formData { - if rsc, ok := v.(io.ReadSeekCloser); ok { - if err := writeContent(k, k, rsc); err != nil { - return err - } - continue - } else if rscs, ok := v.([]io.ReadSeekCloser); ok { - for _, rsc := range rscs { - if err := writeContent(k, k, rsc); err != nil { - return err - } - } - continue - } - // ensure the value is in string format - s, ok := v.(string) - if !ok { - s = fmt.Sprintf("%v", v) - } - if err := writer.WriteField(k, s); err != nil { - return err - } - } - if err := writer.Close(); err != nil { - return err - } - return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) -} - -// SkipBodyDownload will disable automatic downloading of the response body. -func SkipBodyDownload(req *policy.Request) { - req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) -} - -// CtxAPINameKey is used as a context key for adding/retrieving the API name. -type CtxAPINameKey = shared.CtxAPINameKey diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go deleted file mode 100644 index 003c875b1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net/http" - - azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" -) - -// Payload reads and returns the response body or an error. -// On a successful read, the response body is cached. -// Subsequent reads will access the cached value. -func Payload(resp *http.Response) ([]byte, error) { - return exported.Payload(resp, nil) -} - -// HasStatusCode returns true if the Response's status code is one of the specified values. -func HasStatusCode(resp *http.Response, statusCodes ...int) bool { - return exported.HasStatusCode(resp, statusCodes...) -} - -// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. -func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { - p, err := Payload(resp) - if err != nil { - return err - } - return DecodeByteArray(string(p), v, format) -} - -// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsJSON(resp *http.Response, v interface{}) error { - payload, err := Payload(resp) - if err != nil { - return err - } - // TODO: verify early exit is correct - if len(payload) == 0 { - return nil - } - err = removeBOM(resp) - if err != nil { - return err - } - err = json.Unmarshal(payload, v) - if err != nil { - err = fmt.Errorf("unmarshalling type %T: %s", v, err) - } - return err -} - -// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. -func UnmarshalAsXML(resp *http.Response, v interface{}) error { - payload, err := Payload(resp) - if err != nil { - return err - } - // TODO: verify early exit is correct - if len(payload) == 0 { - return nil - } - err = removeBOM(resp) - if err != nil { - return err - } - err = xml.Unmarshal(payload, v) - if err != nil { - err = fmt.Errorf("unmarshalling type %T: %s", v, err) - } - return err -} - -// Drain reads the response body to completion then closes it. The bytes read are discarded. -func Drain(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } -} - -// removeBOM removes any byte-order mark prefix from the payload if present. -func removeBOM(resp *http.Response) error { - _, err := exported.Payload(resp, &exported.PayloadOptions{ - BytesModifier: func(b []byte) []byte { - // UTF8 - return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - }, - }) - if err != nil { - return err - } - return nil -} - -// DecodeByteArray will base-64 decode the provided string into v. -func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { - return azexported.DecodeByteArray(s, v, format) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go deleted file mode 100644 index 1c75d771f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !wasm - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "net" -) - -func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { - return dialer.DialContext -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go deleted file mode 100644 index 3dc9eeecd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build (js && wasm) || wasip1 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "context" - "net" -) - -func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go deleted file mode 100644 index 2124c1d48..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go +++ /dev/null @@ -1,48 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package runtime - -import ( - "crypto/tls" - "net" - "net/http" - "time" - - "golang.org/x/net/http2" -) - -var defaultHTTPClient *http.Client - -func init() { - defaultTransport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: defaultTransportDialContext(&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }), - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 10, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - Renegotiation: tls.RenegotiateFreelyAsClient, - }, - } - // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed - if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { - // if the connection has been idle for 10 seconds, send a ping frame for a health check - http2Transport.ReadIdleTimeout = 10 * time.Second - // if there's no response to the ping within the timeout, the connection will be closed - http2Transport.PingTimeout = 5 * time.Second - } - defaultHTTPClient = &http.Client{ - Transport: defaultTransport, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go deleted file mode 100644 index cadaef3d5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -// Package streaming contains helpers for streaming IO operations and progress reporting. -package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go deleted file mode 100644 index fbcd48311..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package streaming - -import ( - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" -) - -type progress struct { - rc io.ReadCloser - rsc io.ReadSeekCloser - pr func(bytesTransferred int64) - offset int64 -} - -// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. -// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an -// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser -// has its underlying stream closed. -func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { - return exported.NopCloser(rs) -} - -// NewRequestProgress adds progress reporting to an HTTP request's body stream. -func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { - return &progress{ - rc: body, - rsc: body, - pr: pr, - offset: 0, - } -} - -// NewResponseProgress adds progress reporting to an HTTP response's body stream. -func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { - return &progress{ - rc: body, - rsc: nil, - pr: pr, - offset: 0, - } -} - -// Read reads a block of data from an inner stream and reports progress -func (p *progress) Read(b []byte) (n int, err error) { - n, err = p.rc.Read(b) - if err != nil && err != io.EOF { - return - } - p.offset += int64(n) - // Invokes the user's callback method to report progress - p.pr(p.offset) - return -} - -// Seek only expects a zero or from beginning. -func (p *progress) Seek(offset int64, whence int) (int64, error) { - // This should only ever be called with offset = 0 and whence = io.SeekStart - n, err := p.rsc.Seek(offset, whence) - if err == nil { - p.offset = int64(n) - } - return n, err -} - -// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. -func (p *progress) Close() error { - return p.rc.Close() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go deleted file mode 100644 index faa98c9dc..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -// Package to contains various type-conversion helper functions. -package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go deleted file mode 100644 index e0e4817b9..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package to - -// Ptr returns a pointer to the provided value. -func Ptr[T any](v T) *T { - return &v -} - -// SliceOfPtrs returns a slice of *T from the specified values. -func SliceOfPtrs[T any](vv ...T) []*T { - slc := make([]*T, len(vv)) - for i := range vv { - slc[i] = Ptr(vv[i]) - } - return slc -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go deleted file mode 100644 index 80282d4ab..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package tracing - -// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends. -type SpanKind int - -const ( - // SpanKindInternal indicates the span represents an internal operation within an application. - SpanKindInternal SpanKind = 1 - - // SpanKindServer indicates the span covers server-side handling of a request. - SpanKindServer SpanKind = 2 - - // SpanKindClient indicates the span describes a request to a remote service. - SpanKindClient SpanKind = 3 - - // SpanKindProducer indicates the span was created by a messaging producer. - SpanKindProducer SpanKind = 4 - - // SpanKindConsumer indicates the span was created by a messaging consumer. - SpanKindConsumer SpanKind = 5 -) - -// SpanStatus represents the status of a span. -type SpanStatus int - -const ( - // SpanStatusUnset is the default status code. - SpanStatusUnset SpanStatus = 0 - - // SpanStatusError indicates the operation contains an error. - SpanStatusError SpanStatus = 1 - - // SpanStatusOK indicates the operation completed successfully. - SpanStatusOK SpanStatus = 2 -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go deleted file mode 100644 index 1ade7c560..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go +++ /dev/null @@ -1,191 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// Package tracing contains the definitions needed to support distributed tracing. -package tracing - -import ( - "context" -) - -// ProviderOptions contains the optional values when creating a Provider. -type ProviderOptions struct { - // for future expansion -} - -// NewProvider creates a new Provider with the specified values. -// - newTracerFn is the underlying implementation for creating Tracer instances -// - options contains optional values; pass nil to accept the default value -func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider { - return Provider{ - newTracerFn: newTracerFn, - } -} - -// Provider is the factory that creates Tracer instances. -// It defaults to a no-op provider. -type Provider struct { - newTracerFn func(name, version string) Tracer -} - -// NewTracer creates a new Tracer for the specified module name and version. -// - module - the fully qualified name of the module -// - version - the version of the module -func (p Provider) NewTracer(module, version string) (tracer Tracer) { - if p.newTracerFn != nil { - tracer = p.newTracerFn(module, version) - } - return -} - -///////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// TracerOptions contains the optional values when creating a Tracer. -type TracerOptions struct { - // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. - SpanFromContext func(context.Context) Span -} - -// NewTracer creates a Tracer with the specified values. -// - newSpanFn is the underlying implementation for creating Span instances -// - options contains optional values; pass nil to accept the default value -func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { - if options == nil { - options = &TracerOptions{} - } - return Tracer{ - newSpanFn: newSpanFn, - spanFromContextFn: options.SpanFromContext, - } -} - -// Tracer is the factory that creates Span instances. -type Tracer struct { - attrs []Attribute - newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) - spanFromContextFn func(ctx context.Context) Span -} - -// Start creates a new span and a context.Context that contains it. -// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span -// - spanName identifies the span within a trace, it's typically the fully qualified API name -// - options contains optional values for the span, pass nil to accept any defaults -func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { - if t.newSpanFn != nil { - opts := SpanOptions{} - if options != nil { - opts = *options - } - opts.Attributes = append(opts.Attributes, t.attrs...) - return t.newSpanFn(ctx, spanName, &opts) - } - return ctx, Span{} -} - -// SetAttributes sets attrs to be applied to each Span. If a key from attrs -// already exists for an attribute of the Span it will be overwritten with -// the value contained in attrs. -func (t *Tracer) SetAttributes(attrs ...Attribute) { - t.attrs = append(t.attrs, attrs...) -} - -// Enabled returns true if this Tracer is capable of creating Spans. -func (t Tracer) Enabled() bool { - return t.newSpanFn != nil -} - -// SpanFromContext returns the Span associated with the current context. -// If the provided context has no Span, false is returned. -func (t Tracer) SpanFromContext(ctx context.Context) Span { - if t.spanFromContextFn != nil { - return t.spanFromContextFn(ctx) - } - return Span{} -} - -// SpanOptions contains optional settings for creating a span. -type SpanOptions struct { - // Kind indicates the kind of Span. - Kind SpanKind - - // Attributes contains key-value pairs of attributes for the span. - Attributes []Attribute -} - -///////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// SpanImpl abstracts the underlying implementation for Span, -// allowing it to work with various tracing implementations. -// Any zero-values will have their default, no-op behavior. -type SpanImpl struct { - // End contains the implementation for the Span.End method. - End func() - - // SetAttributes contains the implementation for the Span.SetAttributes method. - SetAttributes func(...Attribute) - - // AddEvent contains the implementation for the Span.AddEvent method. - AddEvent func(string, ...Attribute) - - // SetStatus contains the implementation for the Span.SetStatus method. - SetStatus func(SpanStatus, string) -} - -// NewSpan creates a Span with the specified implementation. -func NewSpan(impl SpanImpl) Span { - return Span{ - impl: impl, - } -} - -// Span is a single unit of a trace. A trace can contain multiple spans. -// A zero-value Span provides a no-op implementation. -type Span struct { - impl SpanImpl -} - -// End terminates the span and MUST be called before the span leaves scope. -// Any further updates to the span will be ignored after End is called. -func (s Span) End() { - if s.impl.End != nil { - s.impl.End() - } -} - -// SetAttributes sets the specified attributes on the Span. -// Any existing attributes with the same keys will have their values overwritten. -func (s Span) SetAttributes(attrs ...Attribute) { - if s.impl.SetAttributes != nil { - s.impl.SetAttributes(attrs...) - } -} - -// AddEvent adds a named event with an optional set of attributes to the span. -func (s Span) AddEvent(name string, attrs ...Attribute) { - if s.impl.AddEvent != nil { - s.impl.AddEvent(name, attrs...) - } -} - -// SetStatus sets the status on the span along with a description. -func (s Span) SetStatus(code SpanStatus, desc string) { - if s.impl.SetStatus != nil { - s.impl.SetStatus(code, desc) - } -} - -///////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// Attribute is a key-value pair. -type Attribute struct { - // Key is the name of the attribute. - Key string - - // Value is the attribute's value. - // Types that are natively supported include int64, float64, int, bool, string. - // Any other type will be formatted per rules of fmt.Sprintf("%v"). - Value any -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt deleted file mode 100644 index 48ea6616b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) Microsoft Corporation. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go deleted file mode 100644 index 245af7d2b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package diag - -import ( - "fmt" - "runtime" - "strings" -) - -// Caller returns the file and line number of a frame on the caller's stack. -// If the funtion fails an empty string is returned. -// skipFrames - the number of frames to skip when determining the caller. -// Passing a value of 0 will return the immediate caller of this function. -func Caller(skipFrames int) string { - if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { - // the skipFrames + 1 is to skip ourselves - frame := runtime.FuncForPC(pc) - return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) - } - return "" -} - -// StackTrace returns a formatted stack trace string. -// If the funtion fails an empty string is returned. -// skipFrames - the number of stack frames to skip before composing the trace string. -// totalFrames - the maximum number of stack frames to include in the trace string. -func StackTrace(skipFrames, totalFrames int) string { - pcCallers := make([]uintptr, totalFrames) - if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { - return "" - } - frames := runtime.CallersFrames(pcCallers) - sb := strings.Builder{} - for { - frame, more := frames.Next() - sb.WriteString(frame.Function) - sb.WriteString("()\n\t") - sb.WriteString(frame.File) - sb.WriteRune(':') - sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) - if !more { - break - } - } - return sb.String() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go deleted file mode 100644 index 66bf13e5f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go deleted file mode 100644 index 8c6eacb61..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go deleted file mode 100644 index 8ee66b526..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package errorinfo - -// NonRetriable represents a non-transient error. This works in -// conjunction with the retry policy, indicating that the error condition -// is idempotent, so no retries will be attempted. -// Use errors.As() to access this interface in the error chain. -type NonRetriable interface { - error - NonRetriable() -} - -// NonRetriableError marks the specified error as non-retriable. -// This function takes an error as input and returns a new error that is marked as non-retriable. -func NonRetriableError(err error) error { - return &nonRetriableError{err} -} - -// nonRetriableError is a struct that embeds the error interface. -// It is used to represent errors that should not be retried. -type nonRetriableError struct { - error -} - -// Error method for nonRetriableError struct. -// It returns the error message of the embedded error. -func (p *nonRetriableError) Error() string { - return p.error.Error() -} - -// NonRetriable is a marker method for nonRetriableError struct. -// Non-functional and indicates that the error is non-retriable. -func (*nonRetriableError) NonRetriable() { - // marker method -} - -// Unwrap method for nonRetriableError struct. -// It returns the original error that was marked as non-retriable. -func (p *nonRetriableError) Unwrap() error { - return p.error -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go deleted file mode 100644 index 9948f604b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go +++ /dev/null @@ -1,129 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "errors" - "io" - "net/http" -) - -// HasStatusCode returns true if the Response's status code is one of the specified values. -// Exported as runtime.HasStatusCode(). -func HasStatusCode(resp *http.Response, statusCodes ...int) bool { - if resp == nil { - return false - } - for _, sc := range statusCodes { - if resp.StatusCode == sc { - return true - } - } - return false -} - -// PayloadOptions contains the optional values for the Payload func. -// NOT exported but used by azcore. -type PayloadOptions struct { - // BytesModifier receives the downloaded byte slice and returns an updated byte slice. - // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM). - BytesModifier func([]byte) []byte -} - -// Payload reads and returns the response body or an error. -// On a successful read, the response body is cached. -// Subsequent reads will access the cached value. -// Exported as runtime.Payload() WITHOUT the opts parameter. -func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { - if resp.Body == nil { - // this shouldn't happen in real-world scenarios as a - // response with no body should set it to http.NoBody - return nil, nil - } - modifyBytes := func(b []byte) []byte { return b } - if opts != nil && opts.BytesModifier != nil { - modifyBytes = opts.BytesModifier - } - - // r.Body won't be a nopClosingBytesReader if downloading was skipped - if buf, ok := resp.Body.(*nopClosingBytesReader); ok { - bytesBody := modifyBytes(buf.Bytes()) - buf.Set(bytesBody) - return bytesBody, nil - } - - bytesBody, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, err - } - - bytesBody = modifyBytes(bytesBody) - resp.Body = &nopClosingBytesReader{s: bytesBody} - return bytesBody, nil -} - -// PayloadDownloaded returns true if the response body has already been downloaded. -// This implies that the Payload() func above has been previously called. -// NOT exported but used by azcore. -func PayloadDownloaded(resp *http.Response) bool { - _, ok := resp.Body.(*nopClosingBytesReader) - return ok -} - -// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice. -// It also provides direct access to the byte slice to avoid rereading. -type nopClosingBytesReader struct { - s []byte - i int64 -} - -// Bytes returns the underlying byte slice. -func (r *nopClosingBytesReader) Bytes() []byte { - return r.s -} - -// Close implements the io.Closer interface. -func (*nopClosingBytesReader) Close() error { - return nil -} - -// Read implements the io.Reader interface. -func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) { - if r.i >= int64(len(r.s)) { - return 0, io.EOF - } - n = copy(b, r.s[r.i:]) - r.i += int64(n) - return -} - -// Set replaces the existing byte slice with the specified byte slice and resets the reader. -func (r *nopClosingBytesReader) Set(b []byte) { - r.s = b - r.i = 0 -} - -// Seek implements the io.Seeker interface. -func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { - var i int64 - switch whence { - case io.SeekStart: - i = offset - case io.SeekCurrent: - i = r.i + offset - case io.SeekEnd: - i = int64(len(r.s)) + offset - default: - return 0, errors.New("nopClosingBytesReader: invalid whence") - } - if i < 0 { - return 0, errors.New("nopClosingBytesReader: negative position") - } - r.i = i - return i, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go deleted file mode 100644 index d7876d297..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go deleted file mode 100644 index 4f1dcf1b7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go +++ /dev/null @@ -1,104 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package log - -import ( - "fmt" - "os" - "time" -) - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY -/////////////////////////////////////////////////////////////////////////////////////////////////// - -// Event is used to group entries. Each group can be toggled on or off. -type Event string - -// SetEvents is used to control which events are written to -// the log. By default all log events are writen. -func SetEvents(cls ...Event) { - log.cls = cls -} - -// SetListener will set the Logger to write to the specified listener. -func SetListener(lst func(Event, string)) { - log.lst = lst -} - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// END PUBLIC SURFACE AREA -/////////////////////////////////////////////////////////////////////////////////////////////////// - -// Should returns true if the specified log event should be written to the log. -// By default all log events will be logged. Call SetEvents() to limit -// the log events for logging. -// If no listener has been set this will return false. -// Calling this method is useful when the message to log is computationally expensive -// and you want to avoid the overhead if its log event is not enabled. -func Should(cls Event) bool { - if log.lst == nil { - return false - } - if log.cls == nil || len(log.cls) == 0 { - return true - } - for _, c := range log.cls { - if c == cls { - return true - } - } - return false -} - -// Write invokes the underlying listener with the specified event and message. -// If the event shouldn't be logged or there is no listener then Write does nothing. -func Write(cls Event, message string) { - if !Should(cls) { - return - } - log.lst(cls, message) -} - -// Writef invokes the underlying listener with the specified event and formatted message. -// If the event shouldn't be logged or there is no listener then Writef does nothing. -func Writef(cls Event, format string, a ...interface{}) { - if !Should(cls) { - return - } - log.lst(cls, fmt.Sprintf(format, a...)) -} - -// TestResetEvents is used for TESTING PURPOSES ONLY. -func TestResetEvents() { - log.cls = nil -} - -// logger controls which events to log and writing to the underlying log. -type logger struct { - cls []Event - lst func(Event, string) -} - -// the process-wide logger -var log logger - -func init() { - initLogging() -} - -// split out for testing purposes -func initLogging() { - if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { - // cls could be enhanced to support a comma-delimited list of log events - log.lst = func(cls Event, msg string) { - // simple console logger, it writes to stderr in the following format: - // [time-stamp] Event: message - fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) - } - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go deleted file mode 100644 index db8269627..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go +++ /dev/null @@ -1,155 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package poller - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" -) - -// the well-known set of LRO status/provisioning state values. -const ( - StatusSucceeded = "Succeeded" - StatusCanceled = "Canceled" - StatusFailed = "Failed" - StatusInProgress = "InProgress" -) - -// these are non-conformant states that we've seen in the wild. -// we support them for back-compat. -const ( - StatusCancelled = "Cancelled" - StatusCompleted = "Completed" -) - -// IsTerminalState returns true if the LRO's state is terminal. -func IsTerminalState(s string) bool { - return Failed(s) || Succeeded(s) -} - -// Failed returns true if the LRO's state is terminal failure. -func Failed(s string) bool { - return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled) -} - -// Succeeded returns true if the LRO's state is terminal success. -func Succeeded(s string) bool { - return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted) -} - -// returns true if the LRO response contains a valid HTTP status code -func StatusCodeValid(resp *http.Response) bool { - return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) -} - -// IsValidURL verifies that the URL is valid and absolute. -func IsValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// ErrNoBody is returned if the response didn't contain a body. -var ErrNoBody = errors.New("the response did not contain a body") - -// GetJSON reads the response body into a raw JSON object. -// It returns ErrNoBody if there was no content. -func GetJSON(resp *http.Response) (map[string]any, error) { - body, err := exported.Payload(resp, nil) - if err != nil { - return nil, err - } - if len(body) == 0 { - return nil, ErrNoBody - } - // unmarshall the body to get the value - var jsonBody map[string]any - if err = json.Unmarshal(body, &jsonBody); err != nil { - return nil, err - } - return jsonBody, nil -} - -// provisioningState returns the provisioning state from the response or the empty string. -func provisioningState(jsonBody map[string]any) string { - jsonProps, ok := jsonBody["properties"] - if !ok { - return "" - } - props, ok := jsonProps.(map[string]any) - if !ok { - return "" - } - rawPs, ok := props["provisioningState"] - if !ok { - return "" - } - ps, ok := rawPs.(string) - if !ok { - return "" - } - return ps -} - -// status returns the status from the response or the empty string. -func status(jsonBody map[string]any) string { - rawStatus, ok := jsonBody["status"] - if !ok { - return "" - } - status, ok := rawStatus.(string) - if !ok { - return "" - } - return status -} - -// GetStatus returns the LRO's status from the response body. -// Typically used for Azure-AsyncOperation flows. -// If there is no status in the response body the empty string is returned. -func GetStatus(resp *http.Response) (string, error) { - jsonBody, err := GetJSON(resp) - if err != nil { - return "", err - } - return status(jsonBody), nil -} - -// GetProvisioningState returns the LRO's state from the response body. -// If there is no state in the response body the empty string is returned. -func GetProvisioningState(resp *http.Response) (string, error) { - jsonBody, err := GetJSON(resp) - if err != nil { - return "", err - } - return provisioningState(jsonBody), nil -} - -// GetResourceLocation returns the LRO's resourceLocation value from the response body. -// Typically used for Operation-Location flows. -// If there is no resourceLocation in the response body the empty string is returned. -func GetResourceLocation(resp *http.Response) (string, error) { - jsonBody, err := GetJSON(resp) - if err != nil { - return "", err - } - v, ok := jsonBody["resourceLocation"] - if !ok { - // it might be ok if the field doesn't exist, the caller must make that determination - return "", nil - } - vv, ok := v.(string) - if !ok { - return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) - } - return vv, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go deleted file mode 100644 index 238ef42ed..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ /dev/null @@ -1,123 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package temporal - -import ( - "sync" - "time" -) - -// AcquireResource abstracts a method for refreshing a temporal resource. -type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) - -// Resource is a temporal resource (usually a credential) that requires periodic refreshing. -type Resource[TResource, TState any] struct { - // cond is used to synchronize access to the shared resource embodied by the remaining fields - cond *sync.Cond - - // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource - acquiring bool - - // resource contains the value of the shared resource - resource TResource - - // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired - expiration time.Time - - // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource - lastAttempt time.Time - - // acquireResource is the callback function that actually acquires the resource - acquireResource AcquireResource[TResource, TState] -} - -// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. -func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { - return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} -} - -// Get returns the underlying resource. -// If the resource is fresh, no refresh is performed. -func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { - // If the resource is expiring within this time window, update it eagerly. - // This allows other threads/goroutines to keep running by using the not-yet-expired - // resource value while one thread/goroutine updates the resource. - const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration - const backoff = 30 * time.Second // Minimum wait time between eager update attempts - - now, acquire, expired := time.Now(), false, false - - // acquire exclusive lock - er.cond.L.Lock() - resource := er.resource - - for { - expired = er.expiration.IsZero() || er.expiration.Before(now) - if expired { - // The resource was never acquired or has expired - if !er.acquiring { - // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it - er.acquiring, acquire = true, true - break - } - // Getting here means that this thread/goroutine will wait for the updated resource - } else if er.expiration.Add(-window).Before(now) { - // The resource is valid but is expiring within the time window - if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { - // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted - // to do so within the last 30 seconds, this thread/goroutine will do it - er.acquiring, acquire = true, true - break - } - // This thread/goroutine will use the existing resource value while another updates it - resource = er.resource - break - } else { - // The resource is not close to expiring, this thread/goroutine should use its current value - resource = er.resource - break - } - // If we get here, wait for the new resource value to be acquired/updated - er.cond.Wait() - } - er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked - - var err error - if acquire { - // This thread/goroutine has been selected to acquire/update the resource - var expiration time.Time - var newValue TResource - er.lastAttempt = now - newValue, expiration, err = er.acquireResource(state) - - // Atomically, update the shared resource's new value & expiration. - er.cond.L.Lock() - if err == nil { - // Update resource & expiration, return the new value - resource = newValue - er.resource, er.expiration = resource, expiration - } else if !expired { - // An eager update failed. Discard the error and return the current--still valid--resource value - err = nil - } - er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource - - // Wake up any waiting threads/goroutines since there is a resource they can ALL use - er.cond.L.Unlock() - er.cond.Broadcast() - } - return resource, err // Return the resource this thread/goroutine can use -} - -// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). -func (er *Resource[TResource, TState]) Expire() { - er.cond.L.Lock() - defer er.cond.L.Unlock() - - // Reset the expiration as if we never got this resource to begin with - er.expiration = time.Time{} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go deleted file mode 100644 index a3824bee8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go deleted file mode 100644 index 278ac9cd1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package uuid - -import ( - "crypto/rand" - "errors" - "fmt" - "strconv" -) - -// The UUID reserved variants. -const ( - reservedRFC4122 byte = 0x40 -) - -// A UUID representation compliant with specification in RFC4122 document. -type UUID [16]byte - -// New returns a new UUID using the RFC4122 algorithm. -func New() (UUID, error) { - u := UUID{} - // Set all bits to pseudo-random values. - // NOTE: this takes a process-wide lock - _, err := rand.Read(u[:]) - if err != nil { - return u, err - } - u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) - - var version byte = 4 - u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) - return u, nil -} - -// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. -func (u UUID) String() string { - return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" -// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. -func Parse(s string) (UUID, error) { - var uuid UUID - // ensure format - switch len(s) { - case 36: - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 38: - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - s = s[1:37] - default: - return uuid, errors.New("invalid UUID format") - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - // parse chunks - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - b, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return uuid, fmt.Errorf("invalid UUID format: %s", err) - } - uuid[i] = byte(b) - } - return uuid, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md deleted file mode 100644 index 149c85920..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ /dev/null @@ -1,276 +0,0 @@ -# Release History - -## 1.3.1 (2024-02-28) - -### Bugs Fixed - -* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints. -* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426). - -## 1.3.0 (2024-02-12) - -### Bugs Fixed -* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). -* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). -* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). - -### Other Changes -* Updated the version of `azcore` to `1.9.2` - -## 1.3.0-beta.1 (2024-01-09) - -### Features Added - -* Updated service version to `2023-11-03`. -* Added support for Audience when OAuth is used. - -### Bugs Fixed - -* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). - -## 1.2.1 (2023-12-13) - -### Features Added - -* Exposed GetSASURL from specialized clients - -### Bugs Fixed - -* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). -* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method -* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. -* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). -* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). -* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). - -### Other Changes - -* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. - -## 1.2.0 (2023-10-11) - -### Bugs Fixed -* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. - -## 1.2.0-beta.1 (2023-09-18) - -### Features Added -* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 -* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). -* Added `CopySourceTag` option for `UploadBlobFromURLOptions` -* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. -* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). -* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS -* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. -* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. - -### Bugs Fixed -* Fixed issue where some requests fail with mismatch in string to sign. -* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). - -### Other Changes -* Updating version of azcore to 1.6.0. - -## 1.1.0 (2023-07-13) - -### Features Added - -* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). -* Added support for bearer challenge for identity based managed disks. -* Added support for GetAccountInfo to container and blob level clients. -* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). -* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL -* Added support for tag permission in Container SAS. - -### Bugs Fixed - -* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). -* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. -* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). - -### Other Changes - -* Add `dragonfly` to the list of build constraints for `blockblob`. -* Updating version of azcore to 1.6.0 and azidentity to 1.3.0 - -## 1.1.0-beta.1 (2023-05-09) - -### Features Added - -* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). -* Added support for bearer challenge for identity based managed disks. -* Added support for GetAccountInfo to container and blob level clients. -* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). -* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL -* Added support for tag permission in Container SAS. - -### Bugs Fixed - -* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). -* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. - -## 1.0.0 (2023-02-07) - -### Features Added - -* Add support to log calculated block size and count during uploads -* Added MissingSharedKeyCredential error type for cleaner UX. Related to [#19864](https://github.com/Azure/azure-sdk-for-go/issues/19864). - -### Breaking Changes - -* Changed API signatures to map correctly to Azure Storage REST APIs, These changes impact: - * `blob.GetSASURL()` - * `blockblob.StageBlockFromURL()` - * `container.SetAccessPolicy()` - * `container.GetSASURL()` - * `service.GetSASURL()` - * `service.FilterBlobs()` - * `lease.AcquireLease()` (blobs and containers) - * `lease.ChangeLease()` (blobs and containers) -* Type name changes: - * `CpkInfo` -> `CPKInfo` - * `CpkScopeInfo` -> `CPKScopeInfo` - * `RuleId` -> `RuleID` - * `PolicyId` -> `PolicyID` - * `CorsRule` -> `CORSRule` -* Remove `AccountServices` it is now hardcoded to blobs - -### Bugs Fixed - -* Fixed encoding issues seen in FilterBlobs. Fixes [#17421](https://github.com/Azure/azure-sdk-for-go/issues/17421). -* Fixing inconsistency seen with Metadata and ORS response. Fixes [#19688](https://github.com/Azure/azure-sdk-for-go/issues/19688). -* Fixed endless loop during pagination issue [#19773](https://github.com/Azure/azure-sdk-for-go/pull/19773). - -### Other Changes - -* Exported some missing types in the `blob`, `container` and `service` packages. Fixes [#19775](https://github.com/Azure/azure-sdk-for-go/issues/19775). -* SAS changes [#19781](https://github.com/Azure/azure-sdk-for-go/pull/19781): - * AccountSASPermissions: SetImmutabilityPolicy support - * ContainerSASPermissions: Move support - * Validations to ensure correct sas perm ordering - -## 0.6.1 (2022-12-09) - -### Bugs Fixed - -* Fix compilation error on Darwin. - -## 0.6.0 (2022-12-08) - -### Features Added - -* Added BlobDeleteType to DeleteOptions to allow access to ['Permanent'](https://learn.microsoft.com/rest/api/storageservices/delete-blob#permanent-delete) DeleteType. -* Added [Set Blob Expiry API](https://learn.microsoft.com/rest/api/storageservices/set-blob-expiry). -* Added method `ServiceClient()` to the `azblob.Client` type, allowing access to the underlying service client. -* Added support for object level immutability policy with versioning (Version Level WORM). -* Added the custom CRC64 polynomial used by storage for transactional hashes, and implemented automatic hashing for transactions. - -### Breaking Changes - -* Corrected the name for `saoid` and `suoid` SAS parameters in `BlobSignatureValues` struct as per [this](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas#construct-a-user-delegation-sas) -* Updated type of `BlockSize` from int to int64 in `UploadStreamOptions` -* CRC64 transactional hashes are now supplied with a `uint64` rather than a `[]byte` to conform with Golang's `hash/crc64` package -* Field `XMSContentCRC64` has been renamed to `ContentCRC64` -* The `Lease*` constant types and values in the `blob` and `container` packages have been moved to the `lease` package and their names fixed up to avoid stuttering. -* Fields `TransactionalContentCRC64` and `TransactionalContentMD5` have been replaced by `TransactionalValidation`. -* Fields `SourceContentCRC64` and `SourceContentMD5` have been replaced by `SourceContentValidation`. -* Field `TransactionalContentMD5` has been removed from type `AppendBlockFromURLOptions`. - -### Bugs Fixed - -* Corrected signing of User Delegation SAS. Fixes [#19372](https://github.com/Azure/azure-sdk-for-go/issues/19372) and [#19454](https://github.com/Azure/azure-sdk-for-go/issues/19454) -* Added formatting of start and expiry time in [SetAccessPolicy](https://learn.microsoft.com/rest/api/storageservices/set-container-acl#request-body). Fixes [#18712](https://github.com/Azure/azure-sdk-for-go/issues/18712) -* Uploading block blobs larger than 256MB can fail in some cases with error `net/http: HTTP/1.x transport connection broken`. -* Blob name parameters are URL-encoded before constructing the complete blob URL. - -### Other Changes - -* Added some missing public surface area in the `container` and `service` packages. -* The `UploadStream()` methods now use anonymous memory mapped files for buffers in order to reduce heap allocations/fragmentation. - * The anonymous memory mapped files are typically backed by the page/swap file, multiple files are not actually created. - -## 0.5.1 (2022-10-11) - -### Bugs Fixed - -* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string -* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'. - -### Other Changes - -* Improved docs for client constructors. -* Updating azcore version to 1.1.4 - -## 0.5.0 (2022-09-29) - -### Breaking Changes - -* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) - -### Features Added - -* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977) -* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container). - -### Bugs Fixed - -* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767) -* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937) - -## 0.4.1 (2022-05-12) - -### Other Changes - -* Updated to latest `azcore` and `internal` modules - -## 0.4.0 (2022-04-19) - -### Breaking Changes - -* Fixed Issue #17150 : Renaming/refactoring high level methods. -* Fixed Issue #16972 : Constructors should return clients by reference. -* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags - remains the same. - -### Bugs Fixed - -* Fixed Issue #17515 : SetTags options bag missing leaseID. -* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. -* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. -* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID -* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods - ignoring the options bag. -* Fixed Issue #16920 : Fixing error handling example. -* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. -* Fixed Issue #16679 : Response parsing issue in List blobs API. - -## 0.3.0 (2022-02-09) - -### Breaking Changes - -* Updated to latest `azcore`. Public surface area is unchanged. -* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is - now `*RetryReaderOptions`. - -### Bugs Fixed - -* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource. -* Fixed Issue #16223 : `HttpRange` does not expose its fields. -* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient` -* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()` -* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment -* Fixed Issue #16679 : Unable to access Metadata when listing blobs -* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission. -* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError` - -## 0.2.0 (2021-11-03) - -### Breaking Changes - -* Clients now have one constructor per authentication method - -## 0.1.0 (2021-09-13) - -### Features Added - -* This is the initial preview release of the `azblob` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt deleted file mode 100644 index d1ca00f20..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md deleted file mode 100644 index 1f51959fa..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ /dev/null @@ -1,282 +0,0 @@ -# Azure Blob Storage module for Go - -> Service Version: 2023-11-03 - -Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob -Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or -definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction). - -Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to: - -* Authenticate clients with Azure Blob Storage -* Manipulate containers and blobs in an Azure storage account - -Key links: - -[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples] - -## Getting started - -### Prerequisites - -- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) -- Azure subscription - [Create a free account](https://azure.microsoft.com/free/) -- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], -[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. -Here's an example using the Azure CLI: - -```bash -az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS -``` - -### Install the package - -Install the Azure Blob Storage client module for Go with [go get][goget]: - -```bash -go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -``` - -If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module. - -```bash -go get github.com/Azure/azure-sdk-for-go/sdk/azidentity -``` - -### Authenticate the client - -To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. - -```go -// create a credential for authenticating with Azure Active Directory -cred, err := azidentity.NewDefaultAzureCredential(nil) -// TODO: handle err - -// create an azblob.Client for the specified storage account that uses the above credential -client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil) -// TODO: handle err -``` - -Learn more about enabling Azure Active Directory for authentication with Azure Storage: - -* [Authorize access to blobs using Azure Active Directory][storage_ad] - -Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see: - -* [Blob samples][samples] - -## Key concepts - -Blob Storage is designed for: - -- Serving images or documents directly to a browser. -- Storing files for distributed access. -- Streaming video and audio. -- Writing to log files. -- Storing data for backup and restore, disaster recovery, and archiving. -- Storing data for analysis by an on-premises or Azure-hosted service. - -Blob Storage offers three types of resources: - -- The _storage account_ -- One or more _containers_ in a storage account -- One or more _blobs_ in a container - -Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. -The storage account is specified when the `azblob.Client` is constructed. - -### Specialized clients - -The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). - -- [appendblob][append_blob] -- [blockblob][block_blob] -- [pageblob][page_blob] - -The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. - -The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. - -The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more. - -The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more. - -The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens. -See the package's documentation for more information. - -### Goroutine safety - -We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines. - -### Blob metadata - -Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. - -### Additional concepts - -[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | -[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | -[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | -[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) - - -## Examples - -### Upload a blob - -```go -const ( - account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" - containerName = "sample-container" - blobName = "sample-blob" - sampleFile = "path/to/sample/file" -) - -// authenticate with Azure Active Directory -cred, err := azidentity.NewDefaultAzureCredential(nil) -// TODO: handle error - -// create a client for the specified storage account -client, err := azblob.NewClient(account, cred, nil) -// TODO: handle error - -// open the file for reading -file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) -// TODO: handle error -defer file.Close() - -// upload the file to the specified container with the specified blob name -_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) -// TODO: handle error -``` - -### Download a blob - -```go -// this example accesses a public blob via anonymous access, so no credentials are required -client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil) -// TODO: handle error - -// create or open a local file where we can download the blob -file, err := os.Create("cloud.jpg") -// TODO: handle error -defer file.Close() - -// download the blob -_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) -// TODO: handle error -``` - -### Enumerate blobs - -```go -const ( - account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" - containerName = "sample-container" -) - -// authenticate with Azure Active Directory -cred, err := azidentity.NewDefaultAzureCredential(nil) -// TODO: handle error - -// create a client for the specified storage account -client, err := azblob.NewClient(account, cred, nil) -// TODO: handle error - -// blob listings are returned across multiple pages -pager := client.NewListBlobsFlatPager(containerName, nil) - -// continue fetching pages until no more remain -for pager.More() { - // advance to the next page - page, err := pager.NextPage(context.TODO()) - // TODO: handle error - - // print the blob names for this page - for _, blob := range page.Segment.BlobItems { - fmt.Println(*blob.Name) - } -} -``` - -## Troubleshooting - -All Blob service operations will return an -[*azcore.ResponseError][azcore_response_error] on failure with a -populated `ErrorCode` field. Many of these errors are recoverable. -The [bloberror][blob_error] package provides the possible Storage error codes -along with helper facilities for error handling. - -```go -const ( - connectionString = "" - containerName = "sample-container" -) - -// create a client with the provided connection string -client, err := azblob.NewClientFromConnectionString(connectionString, nil) -// TODO: handle error - -// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion -_, err = client.DeleteContainer(context.TODO(), containerName, nil) - -if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) { - // ignore any errors if the container is being deleted or already has been deleted -} else if err != nil { - // TODO: some other error -} -``` - -## Next steps - -Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. - -## Contributing - -See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, -testing, and contributing to this library. - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. For -details, visit [cla.microsoft.com][cla]. - -This project has adopted the [Microsoft Open Source Code of Conduct][coc]. -For more information see the [Code of Conduct FAQ][coc_faq] -or contact [opencode@microsoft.com][coc_contact] with any -additional questions or comments. - -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) - - -[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob -[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation -[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api -[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview -[godevdl]: https://go.dev/dl/ -[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them -[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main -[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview -[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell -[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli -[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[azure_cli]: https://learn.microsoft.com/cli/azure -[azure_sub]: https://azure.microsoft.com/free/ -[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity -[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad -[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError -[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go -[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go -[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go -[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go -[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go -[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go -[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease -[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go -[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas -[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go -[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md -[cla]: https://cla.microsoft.com -[coc]: https://opensource.microsoft.com/codeofconduct/ -[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ -[coc_contact]: mailto:opencode@microsoft.com diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go deleted file mode 100644 index a62abfdc0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ /dev/null @@ -1,366 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package appendblob - -import ( - "context" - "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" - "io" - "os" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a client to an Azure Storage append blob; -type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] - -// NewClient creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a blob or with a shared access signature (SAS) token. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - a SharedKeyCredential created with the matching blob's storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - containerName - the name of the container within the storage account -// - blobName - the name of the blob within the container -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -// BlobClient returns the embedded blob client for this AppendBlob client. -func (ab *Client) BlobClient() *blob.Client { - innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) - return (*blob.Client)(innerBlob) -} - -func (ab *Client) sharedKey() *blob.SharedKeyCredential { - return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) -} - -func (ab *Client) generated() *generated.AppendBlobClient { - _, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) - return appendBlob -} - -func (ab *Client) innerBlobGenerated() *generated.BlobClient { - b := ab.BlobClient() - return base.InnerClient((*base.Client[generated.BlobClient])(b)) -} - -// URL returns the URL endpoint used by the Client object. -func (ab *Client) URL() string { - return ab.generated().Endpoint() -} - -// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { - p, err := blob.ParseURL(ab.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (ab *Client) WithVersionID(versionID string) (*Client, error) { - p, err := blob.ParseURL(ab.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil -} - -// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) { - opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() - resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo, - cpkScopeInfo, modifiedAccessConditions) - return resp, err -} - -// AppendBlock writes a stream to a new block of data to the end of the existing append blob. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. -func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) { - count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) - if err != nil { - return AppendBlockResponse{}, nil - } - - appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() - - if o != nil && o.TransactionalValidation != nil { - body, err = o.TransactionalValidation.Apply(body, appendOptions) - if err != nil { - return AppendBlockResponse{}, nil - } - } - - resp, err := ab.generated().AppendBlock(ctx, - count, - body, - appendOptions, - leaseAccessConditions, - appendPositionAccessConditions, - cpkInfo, - cpkScope, - modifiedAccessConditions) - - return resp, err -} - -// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. -func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { - appendBlockFromURLOptions, - cpkInfo, - cpkScopeInfo, - leaseAccessConditions, - appendPositionAccessConditions, - modifiedAccessConditions, - sourceModifiedAccessConditions := o.format() - - // content length should be 0 on * from URL. always. It's a 400 if it isn't. - resp, err := ab.generated().AppendBlockFromURL(ctx, - source, - 0, - appendBlockFromURLOptions, - cpkInfo, - cpkScopeInfo, - leaseAccessConditions, - appendPositionAccessConditions, - modifiedAccessConditions, - sourceModifiedAccessConditions) - return resp, err -} - -// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. -// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal -func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { - leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() - resp, err := ab.generated().Seal(ctx, - nil, - leaseAccessConditions, - modifiedAccessConditions, - positionAccessConditions) - return resp, err -} - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { - return ab.BlobClient().Delete(ctx, o) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { - return ab.BlobClient().Undelete(ctx, o) -} - -// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (ab *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { - return ab.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) -} - -// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (ab *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { - return ab.BlobClient().DeleteImmutabilityPolicy(ctx, options) -} - -// SetLegalHold operation enables users to set legal hold on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { - return ab.BlobClient().SetLegalHold(ctx, legalHold, options) -} - -// SetTier -// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account. -func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account") -} - -// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry -func (ab *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { - if expiryType == nil { - expiryType = ExpiryTypeNever{} - } - et, opts := expiryType.Format(o) - resp, err := ab.innerBlobGenerated().SetExpiry(ctx, et, opts) - return resp, err -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { - return ab.BlobClient().GetProperties(ctx, o) -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { - return ab.BlobClient().GetAccountInfo(ctx, o) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { - return ab.BlobClient().SetMetadata(ctx, metadata, o) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { - return ab.BlobClient().CreateSnapshot(ctx, o) -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { - return ab.BlobClient().StartCopyFromURL(ctx, copySource, o) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { - return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { - return ab.BlobClient().SetTags(ctx, tags, o) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { - return ab.BlobClient().GetTags(ctx, o) -} - -// CopyFromURL -// Deprecated: CopyFromURL works only with block blob -func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { - return ab.BlobClient().GetSASURL(permissions, expiry, o) -} - -// Concurrent Download Functions ----------------------------------------------------------------------------------------- - -// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { - return ab.BlobClient().DownloadStream(ctx, o) -} - -// DownloadBuffer downloads an Azure blob to a buffer with parallel. -func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { - return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) -} - -// DownloadFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { - return ab.BlobClient().DownloadFile(ctx, file, o) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go deleted file mode 100644 index 0834743f0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go +++ /dev/null @@ -1,180 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package appendblob - -import ( - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// Type Declarations --------------------------------------------------------------------- - -// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method. -type AppendPositionAccessConditions = generated.AppendPositionAccessConditions - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// CreateOptions provides set of configurations for Create Append Blob operation -type CreateOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - AccessConditions *blob.AccessConditions - - HTTPHeaders *blob.HTTPHeaders - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - // Optional. Used to set blob tags in various blob operations. - Tags map[string]string - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]*string -} - -func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := generated.AppendBlobClientCreateOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), - Metadata: o.Metadata, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. -type AppendBlockOptions struct { - // TransactionalValidation specifies the transfer validation type to use. - // The default is nil (no transfer validation). - TransactionalValidation blob.TransferValidationType - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - AccessConditions *blob.AccessConditions -} - -func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, - *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. -type AppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. - SourceContentValidation blob.SourceContentValidationType - - AppendPositionAccessConditions *AppendPositionAccessConditions - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - - AccessConditions *blob.AccessConditions - - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange -} - -func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CPKInfo, - *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, - *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil, nil - } - - options := &generated.AppendBlobClientAppendBlockFromURLOptions{ - SourceRange: exported.FormatHTTPRange(o.Range), - CopySourceAuthorization: o.CopySourceAuthorization, - } - - if o.SourceContentValidation != nil { - o.SourceContentValidation.Apply(options) - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SealOptions provides set of configurations for SealAppendBlob operation -type SealOptions struct { - AccessConditions *blob.AccessConditions - AppendPositionAccessConditions *AppendPositionAccessConditions -} - -func (o *SealOptions) format() (*generated.LeaseAccessConditions, - *generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions - -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ExpiryType defines values for ExpiryType -type ExpiryType = exported.ExpiryType - -// ExpiryTypeAbsolute defines the absolute time for the blob expiry -type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute - -// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry -type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow - -// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry -type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation - -// ExpiryTypeNever defines that the blob will be set to never expire -type ExpiryTypeNever = exported.ExpiryTypeNever - -// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. -type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go deleted file mode 100644 index e6851237c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package appendblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// CreateResponse contains the response from method Client.Create. -type CreateResponse = generated.AppendBlobClientCreateResponse - -// AppendBlockResponse contains the response from method Client.AppendBlock. -type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse - -// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL. -type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse - -// SealResponse contains the response from method Client.Seal. -type SealResponse = generated.AppendBlobClientSealResponse - -// SetExpiryResponse contains the response from method Client.SetExpiry. -type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json deleted file mode 100644 index d971ff1ec..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "AssetsRepo": "Azure/azure-sdk-assets", - "AssetsRepoPrefixPath": "go", - "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_71b0a04c12" -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go deleted file mode 100644 index 287f37056..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ /dev/null @@ -1,471 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "context" - "io" - "os" - "sync" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. -type Client base.Client[generated.BlobClient] - -// NewClient creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a blob or with a shared access signature (SAS) token. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - a SharedKeyCredential created with the matching blob's storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - containerName - the name of the container within the storage account -// - blobName - the name of the blob within the container -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -func (b *Client) generated() *generated.BlobClient { - return base.InnerClient((*base.Client[generated.BlobClient])(b)) -} - -func (b *Client) sharedKey() *SharedKeyCredential { - return base.SharedKey((*base.Client[generated.BlobClient])(b)) -} - -func (b *Client) credential() any { - return base.Credential((*base.Client[generated.BlobClient])(b)) -} - -func (b *Client) getClientOptions() *base.ClientOptions { - return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) -} - -// URL returns the URL endpoint used by the Client object. -func (b *Client) URL() string { - return b.generated().Endpoint() -} - -// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (b *Client) WithSnapshot(snapshot string) (*Client, error) { - p, err := ParseURL(b.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (b *Client) WithVersionID(versionID string) (*Client, error) { - p, err := ParseURL(b.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil -} - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { - deleteOptions, leaseInfo, accessConditions := o.format() - resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) - return resp, err -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { - undeleteOptions := o.format() - resp, err := b.generated().Undelete(ctx, undeleteOptions) - return resp, err -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { - opts, leaseAccessConditions, modifiedAccessConditions := o.format() - resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) - return resp, err -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { - opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() - resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) - return resp, err -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { - opts, leaseAccessConditions, modifiedAccessConditions := o.format() - resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) - return resp, err -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) { - basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} - leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() - resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) - return resp, err -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { - // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter - // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this - // performance hit. - opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) - - return resp, err -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { - opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) - return resp, err -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { - opts, leaseAccessConditions := options.format() - resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) - return resp, err -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { - serializedTags := shared.SerializeBlobTags(tags) - blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - return resp, err -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { - blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() - resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) - return resp, err - -} - -// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked". -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) { - blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format() - blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime - resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions) - return resp, err -} - -// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) { - deleteImmutabilityOptions := options.format() - resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions) - return resp, err -} - -// SetLegalHold operation enables users to set legal hold on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) { - setLegalHoldOptions := options.format() - resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions) - return resp, err -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { - copyOptions, smac, mac, lac, cpkScopeInfo := options.format() - resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) - return resp, err -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { - getAccountInfoOptions := o.format() - resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) - return resp, err -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { - if b.sharedKey() == nil { - return "", bloberror.MissingSharedKeyCredential - } - - urlParts, err := ParseURL(b.URL()) - if err != nil { - return "", err - } - - t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) - - if err != nil { - t = time.Time{} - } - st := o.format() - - qps, err := sas.BlobSignatureValues{ - ContainerName: urlParts.ContainerName, - BlobName: urlParts.BlobName, - SnapshotTime: t, - Version: sas.Version, - Permissions: permissions.String(), - StartTime: st, - ExpiryTime: expiry.UTC(), - }.SignWithSharedKey(b.sharedKey()) - - if err != nil { - return "", err - } - - endpoint := b.URL() + "?" + qps.Encode() - - return endpoint, nil -} - -// Concurrent Download Functions ----------------------------------------------------------------------------------------- - -// downloadBuffer downloads an Azure blob to a WriterAt in parallel. -func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { - if o.BlockSize == 0 { - o.BlockSize = DefaultDownloadBlockSize - } - - count := o.Range.Count - if count == CountToEnd { // If size not specified, calculate it - // If we don't have the length at all, get it - gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) - if err != nil { - return 0, err - } - count = *gr.ContentLength - o.Range.Offset - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return 0, nil - } - - // Prepare and do parallel download. - progress := int64(0) - progressLock := &sync.Mutex{} - - err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - NumChunks: uint64(((count - 1) / o.BlockSize) + 1), - Concurrency: o.Concurrency, - Operation: func(ctx context.Context, chunkStart int64, count int64) error { - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ - Offset: chunkStart + o.Range.Offset, - Count: count, - }, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) - if err != nil { - return err - } - var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = streaming.NewResponseProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) - if err != nil { - return err - } - err = body.Close() - return err - }, - }) - if err != nil { - return 0, err - } - return count, nil -} - -// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { - downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() - if o == nil { - o = &DownloadStreamOptions{} - } - - dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) - if err != nil { - return DownloadStreamResponse{}, err - } - - return DownloadStreamResponse{ - client: b, - DownloadResponse: dr, - getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, - ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), - cpkInfo: o.CPKInfo, - cpkScope: o.CPKScopeInfo, - }, err -} - -// DownloadBuffer downloads an Azure blob to a buffer with parallel. -func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { - if o == nil { - o = &DownloadBufferOptions{} - } - return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) -} - -// DownloadFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { - if o == nil { - o = &DownloadFileOptions{} - } - do := (*downloadOptions)(o) - - // 1. Calculate the size of the destination file - var size int64 - - count := do.Range.Count - if count == CountToEnd { - // Try to get Azure blob's size - getBlobPropertiesOptions := do.getBlobPropertiesOptions() - props, err := b.GetProperties(ctx, getBlobPropertiesOptions) - if err != nil { - return 0, err - } - size = *props.ContentLength - do.Range.Offset - } else { - size = count - } - - // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. - stat, err := file.Stat() - if err != nil { - return 0, err - } - if stat.Size() != size { - if err = file.Truncate(size); err != nil { - return 0, err - } - } - - if size > 0 { - return b.downloadBuffer(ctx, file, *do) - } else { // if the blob's size is 0, there is no need in downloading it - return 0, nil - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go deleted file mode 100644 index daef800ed..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ /dev/null @@ -1,235 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -const ( - CountToEnd = 0 - - SnapshotTimeFormat = exported.SnapshotTimeFormat - - // DefaultDownloadBlockSize is default block size - DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB - - // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel - DefaultConcurrency = shared.DefaultConcurrency -) - -// BlobType defines values for BlobType -type BlobType = generated.BlobType - -const ( - BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob - BlobTypePageBlob BlobType = generated.BlobTypePageBlob - BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob -) - -// PossibleBlobTypeValues returns the possible values for the BlobType const type. -func PossibleBlobTypeValues() []BlobType { - return generated.PossibleBlobTypeValues() -} - -// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType -type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType - -const ( - DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude - DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly -) - -// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. -func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { - return generated.PossibleDeleteSnapshotsOptionTypeValues() -} - -// AccessTier defines values for Blob Access Tier. -type AccessTier = generated.AccessTier - -const ( - AccessTierArchive AccessTier = generated.AccessTierArchive - AccessTierCool AccessTier = generated.AccessTierCool - AccessTierCold AccessTier = generated.AccessTierCold - AccessTierHot AccessTier = generated.AccessTierHot - AccessTierP10 AccessTier = generated.AccessTierP10 - AccessTierP15 AccessTier = generated.AccessTierP15 - AccessTierP20 AccessTier = generated.AccessTierP20 - AccessTierP30 AccessTier = generated.AccessTierP30 - AccessTierP4 AccessTier = generated.AccessTierP4 - AccessTierP40 AccessTier = generated.AccessTierP40 - AccessTierP50 AccessTier = generated.AccessTierP50 - AccessTierP6 AccessTier = generated.AccessTierP6 - AccessTierP60 AccessTier = generated.AccessTierP60 - AccessTierP70 AccessTier = generated.AccessTierP70 - AccessTierP80 AccessTier = generated.AccessTierP80 - AccessTierPremium AccessTier = generated.AccessTierPremium -) - -// PossibleAccessTierValues returns the possible values for the AccessTier const type. -func PossibleAccessTierValues() []AccessTier { - return generated.PossibleAccessTierValues() -} - -// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. -// Valid values are High and Standard. -type RehydratePriority = generated.RehydratePriority - -const ( - RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh - RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard -) - -// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. -func PossibleRehydratePriorityValues() []RehydratePriority { - return generated.PossibleRehydratePriorityValues() -} - -// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode -type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode - -const ( - ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable - ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked -) - -// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. -func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { - return generated.PossibleImmutabilityPolicyModeValues() -} - -// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. -type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting - -const ( - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked - ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked -) - -// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. -func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { - return generated.PossibleImmutabilityPolicySettingValues() -} - -// CopyStatusType defines values for CopyStatusType -type CopyStatusType = generated.CopyStatusType - -const ( - CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending - CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess - CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted - CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed -) - -// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. -func PossibleCopyStatusTypeValues() []CopyStatusType { - return generated.PossibleCopyStatusTypeValues() -} - -// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. -type EncryptionAlgorithmType = generated.EncryptionAlgorithmType - -const ( - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone - EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 -) - -// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. -func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { - return generated.PossibleEncryptionAlgorithmTypeValues() -} - -// ArchiveStatus defines values for ArchiveStatus. -type ArchiveStatus = generated.ArchiveStatus - -const ( - ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool - ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot - ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold -) - -// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. -func PossibleArchiveStatusValues() []ArchiveStatus { - return generated.PossibleArchiveStatusValues() -} - -// DeleteType defines values for DeleteType. -type DeleteType = generated.DeleteType - -const ( - DeleteTypeNone DeleteType = generated.DeleteTypeNone - DeleteTypePermanent DeleteType = generated.DeleteTypePermanent -) - -// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. -func PossibleDeleteTypeValues() []DeleteType { - return generated.PossibleDeleteTypeValues() -} - -// QueryFormatType - The quick query format type. -type QueryFormatType = generated.QueryFormatType - -const ( - QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited - QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON - QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow - QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet -) - -// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. -func PossibleQueryFormatTypeValues() []QueryFormatType { - return generated.PossibleQueryFormatTypeValues() -} - -// TransferValidationType abstracts the various mechanisms used to verify a transfer. -type TransferValidationType = exported.TransferValidationType - -// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. -type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64 - -// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. -func TransferValidationTypeComputeCRC64() TransferValidationType { - return exported.TransferValidationTypeComputeCRC64() -} - -// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. -type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 - -// SourceContentValidationType abstracts the various mechanisms used to validate source content. -// This interface is not publicly implementable. -type SourceContentValidationType interface { - Apply(generated.SourceContentSetter) - notPubliclyImplementable() -} - -// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64. -type SourceContentValidationTypeCRC64 []byte - -// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64. -func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) { - src.SetSourceContentCRC64(s) -} - -func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {} - -var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil) - -// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5. -type SourceContentValidationTypeMD5 []byte - -// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5. -func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) { - src.SetSourceContentMD5(s) -} - -func (SourceContentValidationTypeMD5) notPubliclyImplementable() {} - -var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go deleted file mode 100644 index d73346889..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ /dev/null @@ -1,580 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential = exported.SharedKeyCredential - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - return exported.NewSharedKeyCredential(accountName, accountKey) -} - -// Type Declarations --------------------------------------------------------------------- - -// AccessConditions identifies blob-specific access conditions which you optionally set. -type AccessConditions = exported.BlobAccessConditions - -// LeaseAccessConditions contains optional parameters to access leased entity. -type LeaseAccessConditions = exported.LeaseAccessConditions - -// ModifiedAccessConditions contains a group of parameters for specifying access conditions. -type ModifiedAccessConditions = exported.ModifiedAccessConditions - -// CPKInfo contains a group of parameters for client provided encryption key. -type CPKInfo = generated.CPKInfo - -// CPKScopeInfo contains a group of parameters for client provided encryption scope. -type CPKScopeInfo = generated.CPKScopeInfo - -// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type HTTPHeaders = generated.BlobHTTPHeaders - -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions - -// Tags represent map of blob index tags -type Tags = generated.BlobTag - -// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset and zero value count indicates from the offset to the resource's end. -type HTTPRange = exported.HTTPRange - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// DownloadStreamOptions contains the optional parameters for the Client.Download method. -type DownloadStreamOptions struct { - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - - // Range specifies a range of bytes. The default value is all bytes. - Range HTTPRange - - AccessConditions *AccessConditions - CPKInfo *CPKInfo - CPKScopeInfo *CPKScopeInfo -} - -func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - basics := generated.BlobClientDownloadOptions{ - RangeGetContentMD5: o.RangeGetContentMD5, - Range: exported.FormatHTTPRange(o.Range), - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. -type downloadOptions struct { - // Range specifies a range of bytes. The default value is all bytes. - Range HTTPRange - - // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress func(bytesTransferred int64) - - // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - AccessConditions *AccessConditions - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CPKInfo *CPKInfo - CPKScopeInfo *CPKScopeInfo - - // Concurrency indicates the maximum number of blocks to download in parallel (0=default). - Concurrency uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { - if o == nil { - return nil - } - return &GetPropertiesOptions{ - AccessConditions: o.AccessConditions, - CPKInfo: o.CPKInfo, - } -} - -func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { - if o == nil { - return nil - } - return &DownloadStreamOptions{ - AccessConditions: o.AccessConditions, - CPKInfo: o.CPKInfo, - CPKScopeInfo: o.CPKScopeInfo, - Range: rnge, - RangeGetContentMD5: rangeGetContentMD5, - } -} - -// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. -type DownloadBufferOptions struct { - // Range specifies a range of bytes. The default value is all bytes. - Range HTTPRange - - // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress func(bytesTransferred int64) - - // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - AccessConditions *AccessConditions - - // CPKInfo contains a group of parameters for client provided encryption key. - CPKInfo *CPKInfo - - // CPKScopeInfo contains a group of parameters for client provided encryption scope. - CPKScopeInfo *CPKScopeInfo - - // Concurrency indicates the maximum number of blocks to download in parallel (0=default). - Concurrency uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -// DownloadFileOptions contains the optional parameters for the DownloadFile method. -type DownloadFileOptions struct { - // Range specifies a range of bytes. The default value is all bytes. - Range HTTPRange - - // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress func(bytesTransferred int64) - - // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - AccessConditions *AccessConditions - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CPKInfo *CPKInfo - CPKScopeInfo *CPKScopeInfo - - // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. - Concurrency uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DeleteOptions contains the optional parameters for the Client.Delete method. -type DeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. - DeleteSnapshots *DeleteSnapshotsOptionType - AccessConditions *AccessConditions - // Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs. - // WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed - // with caution. - // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob - BlobDeleteType *DeleteType -} - -func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := generated.BlobClientDeleteOptions{ - DeleteSnapshots: o.DeleteSnapshots, - DeleteType: o.BlobDeleteType, // None by default - } - - if o.AccessConditions == nil { - return &basics, nil, nil - } - - return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UndeleteOptions contains the optional parameters for the Client.Undelete method. -type UndeleteOptions struct { - // placeholder for future options -} - -func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetTierOptions contains the optional parameters for the Client.SetTier method. -type SetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - - AccessConditions *AccessConditions -} - -func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method -type GetPropertiesOptions struct { - AccessConditions *AccessConditions - CPKInfo *CPKInfo -} - -func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, - *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. -type SetHTTPHeadersOptions struct { - AccessConditions *AccessConditions -} - -func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetMetadataOptions provides set of configurations for Set Metadata on blob operation -type SetMetadataOptions struct { - AccessConditions *AccessConditions - CPKInfo *CPKInfo - CPKScopeInfo *CPKScopeInfo -} - -func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo, - *CPKScopeInfo, *ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. -type CreateSnapshotOptions struct { - Metadata map[string]*string - AccessConditions *AccessConditions - CPKInfo *CPKInfo - CPKScopeInfo *CPKScopeInfo -} - -func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo, - *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - - return &generated.BlobClientCreateSnapshotOptions{ - Metadata: o.Metadata, - }, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. -type StartCopyFromURLOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Used to set blob tags in various blob operations. - BlobTags map[string]string - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]*string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - AccessConditions *AccessConditions -} - -func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, - *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - basics := generated.BlobClientStartCopyFromURLOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), - Metadata: o.Metadata, - RehydratePriority: o.RehydratePriority, - SealBlob: o.SealBlob, - Tier: o.Tier, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. -type AbortCopyFromURLOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil - } - return nil, o.LeaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetTagsOptions contains the optional parameters for the Client.SetTags method. -type SetTagsOptions struct { - // The version id parameter is an opaque DateTime value that, when present, - // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - VersionID *string - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Optional header, Specifies the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - AccessConditions *AccessConditions -} - -func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &generated.BlobClientSetTagsOptions{ - TransactionalContentMD5: o.TransactionalContentMD5, - TransactionalContentCRC64: o.TransactionalContentCRC64, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, modifiedAccessConditions, leaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetTagsOptions contains the optional parameters for the Client.GetTags method. -type GetTagsOptions struct { - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. - Snapshot *string - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string - - BlobAccessConditions *AccessConditions -} - -func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil, nil - } - - options := &generated.BlobClientGetTagsOptions{ - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, modifiedAccessConditions, leaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy -type SetImmutabilityPolicyOptions struct { - // Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked". - // "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked. - Mode *ImmutabilityPolicySetting - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { - if o == nil { - return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil - } - ac := &exported.BlobAccessConditions{ - ModifiedAccessConditions: o.ModifiedAccessConditions, - } - _, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac) - - options := &generated.BlobClientSetImmutabilityPolicyOptions{ - ImmutabilityPolicyMode: o.Mode, - } - - return options, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method. -type DeleteImmutabilityPolicyOptions struct { - // placeholder for future options -} - -func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method. -type SetLegalHoldOptions struct { - // placeholder for future options -} - -func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. -type GetSASURLOptions struct { - StartTime *time.Time -} - -func (o *GetSASURLOptions) format() time.Time { - if o == nil { - return time.Time{} - } - - var st time.Time - if o.StartTime != nil { - st = o.StartTime.UTC() - } else { - st = time.Time{} - } - return st -} - -// --------------------------------------------------------------------------------------------------------------------- - -// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. -type CopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTags map[string]string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - SourceModifiedAccessConditions *SourceModifiedAccessConditions - - BlobAccessConditions *AccessConditions - - CPKScopeInfo *CPKScopeInfo -} - -func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { - if o == nil { - return nil, nil, nil, nil, nil - } - - options := &generated.BlobClientCopyFromURLOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), - CopySourceAuthorization: o.CopySourceAuthorization, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - LegalHold: o.LegalHold, - Metadata: o.Metadata, - SourceContentMD5: o.SourceContentMD5, - Tier: o.Tier, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetAccountInfoOptions provides set of options for Client.GetAccountInfo -type GetAccountInfoOptions struct { - // placeholder for future options -} - -func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go deleted file mode 100644 index 352d97526..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go +++ /dev/null @@ -1,119 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "context" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// DownloadResponse contains the response from method BlobClient.Download. -type DownloadResponse = generated.BlobClientDownloadResponse - -// DownloadStreamResponse contains the response from the DownloadStream method. -// To read from the stream, read from the Body field, or call the NewRetryReader method. -type DownloadStreamResponse struct { - DownloadResponse - ObjectReplicationRules []ObjectReplicationPolicy - - client *Client - getInfo httpGetterInfo - cpkInfo *CPKInfo - cpkScope *CPKScopeInfo -} - -// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while -// reading, it will make additional requests to reestablish a connection and continue reading. -// Pass nil for options to accept the default options. -// Callers of this method should not access the DownloadStreamResponse.Body field. -func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { - if options == nil { - options = &RetryReaderOptions{} - } - - return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { - accessConditions := &AccessConditions{ - ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, - } - options := DownloadStreamOptions{ - Range: getInfo.Range, - AccessConditions: accessConditions, - CPKInfo: r.cpkInfo, - CPKScopeInfo: r.cpkScope, - } - resp, err := r.client.DownloadStream(ctx, &options) - if err != nil { - return nil, err - } - return resp.Body, err - }, *options) -} - -// DeleteResponse contains the response from method BlobClient.Delete. -type DeleteResponse = generated.BlobClientDeleteResponse - -// UndeleteResponse contains the response from method BlobClient.Undelete. -type UndeleteResponse = generated.BlobClientUndeleteResponse - -// SetTierResponse contains the response from method BlobClient.SetTier. -type SetTierResponse = generated.BlobClientSetTierResponse - -// GetPropertiesResponse contains the response from method BlobClient.GetProperties. -type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse - -// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. -type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse - -// SetMetadataResponse contains the response from method BlobClient.SetMetadata. -type SetMetadataResponse = generated.BlobClientSetMetadataResponse - -// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. -type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse - -// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. -type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse - -// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. -type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse - -// SetTagsResponse contains the response from method BlobClient.SetTags. -type SetTagsResponse = generated.BlobClientSetTagsResponse - -// GetTagsResponse contains the response from method BlobClient.GetTags. -type GetTagsResponse = generated.BlobClientGetTagsResponse - -// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. -type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse - -// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse. -type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse - -// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. -type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse - -// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. -type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse - -// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. -type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse - -// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. -type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse - -// BreakLeaseResponse contains the response from method BlobClient.BreakLease. -type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse - -// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. -type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse - -// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. -type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse - -// RenewLeaseResponse contains the response from method BlobClient.RenewLease. -type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go deleted file mode 100644 index 1deedb590..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "context" - "io" - "net" - "strings" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. -type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) - -// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters -// that should be used to make an HTTP GET request. -type httpGetterInfo struct { - Range HTTPRange - - // ETag specifies the resource's etag that should be used when creating - // the HTTP GET request's If-Match header - ETag *azcore.ETag -} - -// RetryReaderOptions configures the retry reader's behavior. -// Zero-value fields will have their specified default values applied during use. -// This allows for modification of a subset of fields. -type RetryReaderOptions struct { - // MaxRetries specifies the maximum number of attempts a failed read will be retried - // before producing an error. - // The default value is three. - MaxRetries int32 - - // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. - OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) - - // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, - // retryReader has the following special behaviour: closing the response body before it is all read is treated as a - // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = - // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If - // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead - // treated as a fatal (non-retryable) error. - // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens - // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors - // which will be retried. - // The default value is false. - EarlyCloseAsError bool - - doInjectError bool - doInjectErrorRound int32 - injectedError error -} - -// RetryReader attempts to read from response, and if there is a retry-able network error -// returned during reading, it will retry according to retry reader option through executing -// user defined action with provided data to get a new response, and continue the overall reading process -// through reading from the new response. -// RetryReader implements the io.ReadCloser interface. -type RetryReader struct { - ctx context.Context - info httpGetterInfo - retryReaderOptions RetryReaderOptions - getter httpGetter - countWasBounded bool - - // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response - responseMu *sync.Mutex - response io.ReadCloser -} - -// newRetryReader creates a retry reader. -func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { - if o.MaxRetries < 1 { - o.MaxRetries = 3 - } - return &RetryReader{ - ctx: ctx, - getter: getter, - info: info, - countWasBounded: info.Range.Count != CountToEnd, - response: initialResponse, - responseMu: &sync.Mutex{}, - retryReaderOptions: o, - } -} - -// setResponse function -func (s *RetryReader) setResponse(r io.ReadCloser) { - s.responseMu.Lock() - defer s.responseMu.Unlock() - s.response = r -} - -// Read from retry reader -func (s *RetryReader) Read(p []byte) (n int, err error) { - for try := int32(0); ; try++ { - //fmt.Println(try) // Comment out for debugging. - if s.countWasBounded && s.info.Range.Count == CountToEnd { - // User specified an original count and the remaining bytes are 0, return 0, EOF - return 0, io.EOF - } - - s.responseMu.Lock() - resp := s.response - s.responseMu.Unlock() - if resp == nil { // We don't have a response stream to read from, try to get one. - newResponse, err := s.getter(s.ctx, s.info) - if err != nil { - return 0, err - } - // Successful GET; this is the network stream we'll read from. - s.setResponse(newResponse) - resp = newResponse - } - n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) - - // Injection mechanism for testing. - if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { - if s.retryReaderOptions.injectedError != nil { - err = s.retryReaderOptions.injectedError - } else { - err = &net.DNSError{IsTemporary: true} - } - } - - // We successfully read data or end EOF. - if err == nil || err == io.EOF { - s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future - if s.info.Range.Count != CountToEnd { - s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future - } - return n, err // Return the return to the caller - } - _ = s.Close() - - s.setResponse(nil) // Our stream is no longer good - - // Check the retry count and error code, and decide whether to retry. - retriesExhausted := try >= s.retryReaderOptions.MaxRetries - _, isNetError := err.(net.Error) - isUnexpectedEOF := err == io.ErrUnexpectedEOF - willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted - - // Notify, for logging purposes, of any failures - if s.retryReaderOptions.OnFailedRead != nil { - failureCount := try + 1 // because try is zero-based - s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) - } - - if willRetry { - continue - // Loop around and try to get and read from new stream. - } - return n, err // Not retryable, or retries exhausted, so just return - } -} - -// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry -// Is this safe, to close early from another goroutine? Early close ultimately ends up calling -// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" -// which is exactly the behaviour we want. -// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) -// then there are two different types of error that may happen - either the one we check for here, -// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine -// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. -func (s *RetryReader) wasRetryableEarlyClose(err error) bool { - if s.retryReaderOptions.EarlyCloseAsError { - return false // user wants all early closes to be errors, and so not retryable - } - // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text - return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) -} - -// ReadOnClosedBodyMessage of retry reader -const ReadOnClosedBodyMessage = "read on closed response body" - -// Close retry reader -func (s *RetryReader) Close() error { - s.responseMu.Lock() - defer s.responseMu.Unlock() - if s.response != nil { - return s.response.Close() - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go deleted file mode 100644 index c2d517d8a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go +++ /dev/null @@ -1,79 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blob - -import ( - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" -) - -// ObjectReplicationRules struct -type ObjectReplicationRules struct { - RuleID string - Status string -} - -// ObjectReplicationPolicy are deserialized attributes. -type ObjectReplicationPolicy struct { - PolicyID *string - Rules *[]ObjectReplicationRules -} - -// deserializeORSPolicies is utility function to deserialize ORS Policies. -func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) { - if policies == nil { - return nil - } - // For source blobs (blobs that have policy ids and rule ids applied to them), - // the header will be formatted as "x-ms-or-_: {Complete, Failed}". - // The value of this header is the status of the replication. - orPolicyStatusHeader := make(map[string]*string) - for key, value := range policies { - if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { - orPolicyStatusHeader[key] = value - } - } - - parsedResult := make(map[string][]ObjectReplicationRules) - for key, value := range orPolicyStatusHeader { - policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") - policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] - - parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value}) - } - - for policyId, rules := range parsedResult { - objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ - PolicyID: &policyId, - Rules: &rules, - }) - } - return -} - -// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders. -func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { - return HTTPHeaders{ - BlobContentType: resp.ContentType, - BlobContentEncoding: resp.ContentEncoding, - BlobContentLanguage: resp.ContentLanguage, - BlobContentDisposition: resp.ContentDisposition, - BlobCacheControl: resp.CacheControl, - BlobContentMD5: resp.ContentMD5, - } -} - -// URLParts object represents the components that make up an Azure Storage Container/Blob URL. -// NOTE: Changing any SAS-related field requires computing a new SAS signature. -type URLParts = sas.URLParts - -// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. -func ParseURL(u string) (URLParts, error) { - return sas.ParseURL(u) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go deleted file mode 100644 index 07fad6061..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ /dev/null @@ -1,159 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package bloberror - -import ( - "errors" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// HasCode returns true if the provided error is an *azcore.ResponseError -// with its ErrorCode field equal to one of the specified Codes. -func HasCode(err error, codes ...Code) bool { - var respErr *azcore.ResponseError - if !errors.As(err, &respErr) { - return false - } - - for _, code := range codes { - if respErr.ErrorCode == string(code) { - return true - } - } - - return false -} - -// Code - Error codes returned by the service -type Code = generated.StorageErrorCode - -const ( - AccountAlreadyExists Code = "AccountAlreadyExists" - AccountBeingCreated Code = "AccountBeingCreated" - AccountIsDisabled Code = "AccountIsDisabled" - AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" - AuthenticationFailed Code = "AuthenticationFailed" - AuthorizationFailure Code = "AuthorizationFailure" - AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" - AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" - AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" - AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" - AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" - BlobAlreadyExists Code = "BlobAlreadyExists" - BlobArchived Code = "BlobArchived" - BlobBeingRehydrated Code = "BlobBeingRehydrated" - BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" - BlobNotArchived Code = "BlobNotArchived" - BlobNotFound Code = "BlobNotFound" - BlobOverwritten Code = "BlobOverwritten" - BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" - BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" - BlockCountExceedsLimit Code = "BlockCountExceedsLimit" - BlockListTooLong Code = "BlockListTooLong" - CannotChangeToLowerTier Code = "CannotChangeToLowerTier" - CannotVerifyCopySource Code = "CannotVerifyCopySource" - ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" - ConditionNotMet Code = "ConditionNotMet" - ContainerAlreadyExists Code = "ContainerAlreadyExists" - ContainerBeingDeleted Code = "ContainerBeingDeleted" - ContainerDisabled Code = "ContainerDisabled" - ContainerNotFound Code = "ContainerNotFound" - ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" - CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" - CopyIDMismatch Code = "CopyIdMismatch" - EmptyMetadataKey Code = "EmptyMetadataKey" - FeatureVersionMismatch Code = "FeatureVersionMismatch" - ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" - IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" - IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" - InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" - InsufficientAccountPermissions Code = "InsufficientAccountPermissions" - InternalError Code = "InternalError" - InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" - InvalidBlobOrBlock Code = "InvalidBlobOrBlock" - InvalidBlobTier Code = "InvalidBlobTier" - InvalidBlobType Code = "InvalidBlobType" - InvalidBlockID Code = "InvalidBlockId" - InvalidBlockList Code = "InvalidBlockList" - InvalidHTTPVerb Code = "InvalidHttpVerb" - InvalidHeaderValue Code = "InvalidHeaderValue" - InvalidInput Code = "InvalidInput" - InvalidMD5 Code = "InvalidMd5" - InvalidMetadata Code = "InvalidMetadata" - InvalidOperation Code = "InvalidOperation" - InvalidPageRange Code = "InvalidPageRange" - InvalidQueryParameterValue Code = "InvalidQueryParameterValue" - InvalidRange Code = "InvalidRange" - InvalidResourceName Code = "InvalidResourceName" - InvalidSourceBlobType Code = "InvalidSourceBlobType" - InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" - InvalidURI Code = "InvalidUri" - InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" - InvalidXMLDocument Code = "InvalidXmlDocument" - InvalidXMLNodeValue Code = "InvalidXmlNodeValue" - LeaseAlreadyBroken Code = "LeaseAlreadyBroken" - LeaseAlreadyPresent Code = "LeaseAlreadyPresent" - LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" - LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" - LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" - LeaseIDMissing Code = "LeaseIdMissing" - LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" - LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" - LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" - LeaseLost Code = "LeaseLost" - LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" - LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" - LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" - MD5Mismatch Code = "Md5Mismatch" - CRC64Mismatch Code = "Crc64Mismatch" - MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" - MetadataTooLarge Code = "MetadataTooLarge" - MissingContentLengthHeader Code = "MissingContentLengthHeader" - MissingRequiredHeader Code = "MissingRequiredHeader" - MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" - MissingRequiredXMLNode Code = "MissingRequiredXmlNode" - MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" - NoAuthenticationInformation Code = "NoAuthenticationInformation" - NoPendingCopyOperation Code = "NoPendingCopyOperation" - OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" - OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" - OperationTimedOut Code = "OperationTimedOut" - OutOfRangeInput Code = "OutOfRangeInput" - OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" - PendingCopyOperation Code = "PendingCopyOperation" - PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" - PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" - PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" - RequestBodyTooLarge Code = "RequestBodyTooLarge" - RequestURLFailedToParse Code = "RequestUrlFailedToParse" - ResourceAlreadyExists Code = "ResourceAlreadyExists" - ResourceNotFound Code = "ResourceNotFound" - ResourceTypeMismatch Code = "ResourceTypeMismatch" - SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" - SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" - ServerBusy Code = "ServerBusy" - SnapshotCountExceeded Code = "SnapshotCountExceeded" - SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" - SnapshotsPresent Code = "SnapshotsPresent" - SourceConditionNotMet Code = "SourceConditionNotMet" - SystemInUse Code = "SystemInUse" - TargetConditionNotMet Code = "TargetConditionNotMet" - UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" - UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" - UnsupportedHeader Code = "UnsupportedHeader" - UnsupportedQueryParameter Code = "UnsupportedQueryParameter" - UnsupportedXMLNode Code = "UnsupportedXmlNode" -) - -var ( - // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. - MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") - UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go deleted file mode 100644 index 24df42c75..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ /dev/null @@ -1,249 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blockblob - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "errors" - "io" - "sync" - "sync/atomic" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// blockWriter provides methods to upload blocks that represent a file to a server and commit them. -// This allows us to provide a local implementation that fakes the server for hermetic testing. -type blockWriter interface { - StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) - Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error) - CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) -} - -// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { - options.setDefaults() - - wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing - errCh := make(chan error, 1) // contains the first error encountered during processing - - buffers := getBufferManager(options.Concurrency, options.BlockSize) - defer buffers.Free() - - // this controls the lifetime of the uploading goroutines. - // if an error is encountered, cancel() is called which will terminate all uploads. - // NOTE: the ordering is important here. cancel MUST execute before - // cleaning up the buffers so that any uploading goroutines exit first, - // releasing their buffers back to the pool for cleanup. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // all blocks have IDs that start with a random UUID - blockIDPrefix, err := uuid.New() - if err != nil { - return CommitBlockListResponse{}, err - } - tracker := blockTracker{ - blockIDPrefix: blockIDPrefix, - options: options, - } - - // This goroutine grabs a buffer, reads from the stream into the buffer, - // then creates a goroutine to upload/stage the block. - for blockNum := uint32(0); true; blockNum++ { - var buffer T - select { - case buffer = <-buffers.Acquire(): - // got a buffer - default: - // no buffer available; allocate a new buffer if possible - if _, err := buffers.Grow(); err != nil { - return CommitBlockListResponse{}, err - } - - // either grab the newly allocated buffer or wait for one to become available - buffer = <-buffers.Acquire() - } - - var n int - n, err = shared.ReadAtLeast(src, buffer, len(buffer)) - - if n > 0 { - // some data was read, upload it - wg.Add(1) // We're posting a buffer to be sent - - // NOTE: we must pass blockNum as an arg to our goroutine else - // it's captured by reference and can change underneath us! - go func(blockNum uint32) { - // Upload the outgoing block, matching the number of bytes read - err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n]) - if err != nil { - select { - case errCh <- err: - // error was set - default: - // some other error is already set - } - cancel() - } - buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now - - // signal that the block has been staged. - // we MUST do this after attempting to write to errCh - // to avoid it racing with the reading goroutine. - wg.Done() - }(blockNum) - } else { - // nothing was read so the buffer is empty, send it back for reuse/clean-up. - buffers.Release(buffer) - } - - if err != nil { // The reader is done, no more outgoing buffers - if errors.Is(err, io.EOF) { - // these are expected errors, we don't surface those - err = nil - } else { - // some other error happened, terminate any outstanding uploads - cancel() - } - break - } - } - - wg.Wait() // Wait for all outgoing blocks to complete - - if err != nil { - // there was an error reading from src, favor this error over any error during staging - return CommitBlockListResponse{}, err - } - - select { - case err = <-errCh: - // there was an error during staging - return CommitBlockListResponse{}, err - default: - // no error was encountered - } - - // If no error, after all blocks uploaded, commit them to the blob & return the result - return tracker.commitBlocks(ctx, dst) -} - -// used to manage the uploading and committing of blocks -type blockTracker struct { - blockIDPrefix uuid.UUID // UUID used with all blockIDs - maxBlockNum uint32 // defaults to 0 - firstBlock []byte // Used only if maxBlockNum is 0 - options UploadStreamOptions -} - -func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error { - if num == 0 { - bt.firstBlock = buffer - - // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation - // If the payload is exactly the same size as the buffer, there may be more content coming in. - if len(buffer) < int(bt.options.BlockSize) { - return nil - } - } else { - // Else, upload a staged block... - atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) { - // Atomically remember (in t.numBlocks) the maximum block num we've ever seen - if startVal < num { - return num, 0 - } - return startVal, 0 - }) - } - - blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64() - _, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions()) - return err -} - -func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) { - // If the first block had the exact same size as the buffer - // we would have staged it as a block thinking that there might be more data coming - if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) { - // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation - up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions()) - if err != nil { - return CommitBlockListResponse{}, err - } - - // convert UploadResponse to CommitBlockListResponse - return CommitBlockListResponse{ - ClientRequestID: up.ClientRequestID, - ContentMD5: up.ContentMD5, - Date: up.Date, - ETag: up.ETag, - EncryptionKeySHA256: up.EncryptionKeySHA256, - EncryptionScope: up.EncryptionScope, - IsServerEncrypted: up.IsServerEncrypted, - LastModified: up.LastModified, - RequestID: up.RequestID, - Version: up.Version, - VersionID: up.VersionID, - //ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse - }, nil - } - - // Multiple blocks staged, commit them all now - blockID := newUUIDBlockID(bt.blockIDPrefix) - blockIDs := make([]string, bt.maxBlockNum+1) - for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ { - blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() - } - - return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions()) -} - -// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. -// The AtomicMorpher callback is passed a startValue and based on this value it returns -// what the new value should be and the result that AtomicMorph should return to its caller. -type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32) - -// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. -func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 { - for { - currentVal := atomic.LoadUint32(target) - desiredVal, morphResult := morpher(currentVal) - if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { - return morphResult - } - } -} - -type blockID [64]byte - -func (blockID blockID) ToBase64() string { - return base64.StdEncoding.EncodeToString(blockID[:]) -} - -type uuidBlockID blockID - -func newUUIDBlockID(u uuid.UUID) uuidBlockID { - ubi := uuidBlockID{} // Create a new uuidBlockID - copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it - // Block number defaults to 0 - return ubi -} - -func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { - binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID - return ubi // Return the passed-in copy -} - -func (ubi uuidBlockID) ToBase64() string { - return blockID(ubi).ToBase64() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go deleted file mode 100644 index 9acc74b90..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ /dev/null @@ -1,597 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blockblob - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" - "io" - "math" - "os" - "reflect" - "sync" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client defines a set of operations applicable to block blobs. -type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] - -// NewClient creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a blob or with a shared access signature (SAS) token. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - a SharedKeyCredential created with the matching blob's storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - containerName - the name of the container within the storage account -// - blobName - the name of the blob within the container -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -func (bb *Client) sharedKey() *blob.SharedKeyCredential { - return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) -} - -func (bb *Client) generated() *generated.BlockBlobClient { - _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) - return blockBlob -} - -func (bb *Client) innerBlobGenerated() *generated.BlobClient { - b := bb.BlobClient() - return base.InnerClient((*base.Client[generated.BlobClient])(b)) -} - -// URL returns the URL endpoint used by the Client object. -func (bb *Client) URL() string { - return bb.generated().Endpoint() -} - -// BlobClient returns the embedded blob client for this BlockBlob client. -func (bb *Client) BlobClient() *blob.Client { - blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) - return (*blob.Client)(blobClient) -} - -// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { - p, err := blob.ParseURL(bb.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the versionID returning a URL to the base blob. -func (bb *Client) WithVersionID(versionID string) (*Client, error) { - p, err := blob.ParseURL(bb.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil -} - -// Upload creates a new block blob or overwrites an existing block blob. -// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not -// supported with Upload; the content of the existing blob is overwritten with the new content. To -// perform a partial update of a block blob, use StageBlock and CommitBlockList. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { - count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) - if err != nil { - return UploadResponse{}, err - } - - opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() - - if options != nil && options.TransactionalValidation != nil { - body, err = options.TransactionalValidation.Apply(body, opts) - if err != nil { - return UploadResponse{}, err - } - } - - resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) - return resp, err -} - -// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from -// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten -// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put -// Block from URL API in conjunction with Put Block List. -// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url -func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { - opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() - - resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) - - return resp, err -} - -// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. -func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { - count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) - if err != nil { - return StageBlockResponse{}, err - } - - opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() - - if options != nil && options.TransactionalValidation != nil { - body, err = options.TransactionalValidation.Apply(body, opts) - if err != nil { - return StageBlockResponse{}, nil - } - } - - resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) - return resp, err -} - -// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// If count is CountToEnd (0), then data is read from specified offset to the end. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { - - stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() - - resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions, - cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - - return resp, err -} - -// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written -// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob -// by uploading only those blocks that have changed, then committing the new and existing -// blocks together. Any blocks not specified in the block list and permanently deleted. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. -func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { - // this is a code smell in the generated code - blockIds := make([]*string, len(base64BlockIDs)) - for k, v := range base64BlockIDs { - blockIds[k] = to.Ptr(v) - } - - blockLookupList := generated.BlockLookupList{Latest: blockIds} - - var commitOptions *generated.BlockBlobClientCommitBlockListOptions - var headers *generated.BlobHTTPHeaders - var leaseAccess *blob.LeaseAccessConditions - var cpkInfo *generated.CPKInfo - var cpkScope *generated.CPKScopeInfo - var modifiedAccess *generated.ModifiedAccessConditions - - if options != nil { - commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), - Metadata: options.Metadata, - RequestID: options.RequestID, - Tier: options.Tier, - Timeout: options.Timeout, - TransactionalContentCRC64: options.TransactionalContentCRC64, - TransactionalContentMD5: options.TransactionalContentMD5, - LegalHold: options.LegalHold, - ImmutabilityPolicyMode: options.ImmutabilityPolicyMode, - ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, - } - - // If user attempts to pass in their own checksum, errors out. - if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { - return CommitBlockListResponse{}, bloberror.UnsupportedChecksum - } - - headers = options.HTTPHeaders - leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) - cpkInfo = options.CPKInfo - cpkScope = options.CPKScopeInfo - } - - resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) - return resp, err -} - -// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. -func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { - o, lac, mac := options.format() - - resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) - - return resp, err -} - -// Redeclared APIs ----- Copy over to Append blob and Page blob as well. - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { - return bb.BlobClient().Delete(ctx, o) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { - return bb.BlobClient().Undelete(ctx, o) -} - -// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { - return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) -} - -// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { - return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options) -} - -// SetLegalHold operation enables users to set legal hold on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { - return bb.BlobClient().SetLegalHold(ctx, legalHold, options) -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return bb.BlobClient().SetTier(ctx, tier, o) -} - -// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry -func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { - if expiryType == nil { - expiryType = ExpiryTypeNever{} - } - et, opts := expiryType.Format(o) - resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts) - return resp, err -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { - return bb.BlobClient().GetProperties(ctx, o) -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { - return bb.BlobClient().GetAccountInfo(ctx, o) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { - return bb.BlobClient().SetMetadata(ctx, metadata, o) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { - return bb.BlobClient().CreateSnapshot(ctx, o) -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { - return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { - return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { - return bb.BlobClient().SetTags(ctx, tags, o) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { - return bb.BlobClient().GetTags(ctx, o) -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return bb.BlobClient().CopyFromURL(ctx, copySource, o) -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { - return bb.BlobClient().GetSASURL(permissions, expiry, o) -} - -// Concurrent Upload Functions ----------------------------------------------------------------------------------------- - -// uploadFromReader uploads a buffer in blocks to a block blob. -func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { - if o.BlockSize == 0 { - // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error - if actualSize > MaxStageBlockBytes*MaxBlocks { - return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") - } - // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if actualSize <= MaxUploadBlobBytes { - o.BlockSize = MaxUploadBlobBytes // Default if unspecified - } else { - o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks - if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB - o.BlockSize = blob.DefaultDownloadBlockSize - } - // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). - } - } - - if actualSize <= MaxUploadBlobBytes { - // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) - if o.Progress != nil { - body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) - } - - uploadBlockBlobOptions := o.getUploadBlockBlobOptions() - resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) - - return toUploadReaderAtResponseFromUploadResponse(resp), err - } - - var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) - if numBlocks > MaxBlocks { - // prevent any math bugs from attempting to upload too many blocks which will always fail - return uploadFromReaderResponse{}, errors.New("block limit exceeded") - } - - if log.Should(exported.EventUpload) { - urlparts, err := blob.ParseURL(bb.generated().Endpoint()) - if err == nil { - log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v", - urlparts.BlobName, actualSize, o.BlockSize, numBlocks) - } - } - - blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs - progress := int64(0) - progressLock := &sync.Mutex{} - - err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ - OperationName: "uploadFromReader", - TransferSize: actualSize, - ChunkSize: o.BlockSize, - NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), - Concurrency: o.Concurrency, - Operation: func(ctx context.Context, offset int64, chunkSize int64) error { - // This function is called once per block. - // It is passed this block's offset within the buffer and its count of bytes - // Prepare to read the proper block/section of the buffer - if chunkSize < o.BlockSize { - // this is the last block. its actual size might be less - // than the calculated size due to rounding up of the payload - // size to fit in a whole number of blocks. - chunkSize = (actualSize - offset) - } - var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) - blockNum := offset / o.BlockSize - if o.Progress != nil { - blockProgress := int64(0) - body = streaming.NewRequestProgress(shared.NopCloser(body), - func(bytesTransferred int64) { - diff := bytesTransferred - blockProgress - blockProgress = bytesTransferred - progressLock.Lock() // 1 goroutine at a time gets progress report - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks - // at the same time causing PutBlockList to get a mix of blocks from all the clients. - generatedUuid, err := uuid.New() - if err != nil { - return err - } - blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) - stageBlockOptions := o.getStageBlockOptions() - _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) - return err - }, - }) - if err != nil { - return uploadFromReaderResponse{}, err - } - // All put blocks were successful, call Put Block List to finalize the blob - commitBlockListOptions := o.getCommitBlockListOptions() - resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) - - return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err -} - -// UploadBuffer uploads a buffer in blocks to a block blob. -func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { - uploadOptions := uploadFromReaderOptions{} - if o != nil { - uploadOptions = *o - } - - // If user attempts to pass in their own checksum, errors out. - if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { - return UploadBufferResponse{}, bloberror.UnsupportedChecksum - } - - return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) -} - -// UploadFile uploads a file in blocks to a block blob. -func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { - stat, err := file.Stat() - if err != nil { - return uploadFromReaderResponse{}, err - } - uploadOptions := uploadFromReaderOptions{} - if o != nil { - uploadOptions = *o - } - - // If user attempts to pass in their own checksum, errors out. - if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { - return UploadFileResponse{}, bloberror.UnsupportedChecksum - } - - return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) -} - -// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. -// A Context deadline or cancellation will cause this to error. -func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { - if o == nil { - o = &UploadStreamOptions{} - } - - // If user attempts to pass in their own checksum, errors out. - if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { - return UploadStreamResponse{}, bloberror.UnsupportedChecksum - } - - result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) - if err != nil { - return CommitBlockListResponse{}, err - } - - return result, nil -} - -// Concurrent Download Functions ----------------------------------------------------------------------------------------- - -// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { - return bb.BlobClient().DownloadStream(ctx, o) -} - -// DownloadBuffer downloads an Azure blob to a buffer with parallel. -func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { - return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) -} - -// DownloadFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { - return bb.BlobClient().DownloadFile(ctx, file, o) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go deleted file mode 100644 index ce3a5d8de..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ /dev/null @@ -1,52 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blockblob - -import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - -const ( - // CountToEnd specifies the end of the file. - CountToEnd = 0 - - _1MiB = 1024 * 1024 - - // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. - MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB - - // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. - MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB - - // MaxBlocks indicates the maximum number of blocks allowed in a block blob. - MaxBlocks = 50000 -) - -// BlockListType defines values for BlockListType -type BlockListType = generated.BlockListType - -const ( - BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted - BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted - BlockListTypeAll BlockListType = generated.BlockListTypeAll -) - -// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. -func PossibleBlockListTypeValues() []BlockListType { - return generated.PossibleBlockListTypeValues() -} - -// BlobCopySourceTags - can be 'COPY' or 'REPLACE' -type BlobCopySourceTags = generated.BlobCopySourceTags - -const ( - BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY - BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE -) - -// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. -func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { - return generated.PossibleBlobCopySourceTagsValues() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go deleted file mode 100644 index 453d569e5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ /dev/null @@ -1,411 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blockblob - -import ( - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// Type Declarations --------------------------------------------------------------------- - -// Block - Represents a single block in a block blob. It describes the block's ID and size. -type Block = generated.Block - -// BlockList - can be uncommitted or committed blocks (committed/uncommitted) -type BlockList = generated.BlockList - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// UploadOptions contains the optional parameters for the Client.Upload method. -type UploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - Tags map[string]string - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]*string - - // Optional. Indicates the tier to be set on the blob. - Tier *blob.AccessTier - - // TransactionalValidation specifies the transfer validation type to use. - // The default is nil (no transfer validation). - TransactionalValidation blob.TransferValidationType - - HTTPHeaders *blob.HTTPHeaders - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - AccessConditions *blob.AccessConditions - LegalHold *bool - ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting - ImmutabilityPolicyExpiryTime *time.Time - - // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead - TransactionalContentMD5 []byte -} - -func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, - *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - basics := generated.BlockBlobClientUploadOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), - Metadata: o.Metadata, - Tier: o.Tier, - TransactionalContentMD5: o.TransactionalContentMD5, - LegalHold: o.LegalHold, - ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, - ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. -type UploadBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - Tags map[string]string - - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]*string - - // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Optional. Indicates the tier to be set on the blob. - Tier *blob.AccessTier - - // Additional optional headers - HTTPHeaders *blob.HTTPHeaders - AccessConditions *blob.AccessConditions - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions -} - -func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, - *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, - *generated.SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil, nil - } - - options := generated.BlockBlobClientPutBlobFromURLOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), - CopySourceAuthorization: o.CopySourceAuthorization, - CopySourceBlobProperties: o.CopySourceBlobProperties, - CopySourceTags: o.CopySourceTags, - Metadata: o.Metadata, - SourceContentMD5: o.SourceContentMD5, - Tier: o.Tier, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// StageBlockOptions contains the optional parameters for the Client.StageBlock method. -type StageBlockOptions struct { - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - LeaseAccessConditions *blob.LeaseAccessConditions - - // TransactionalValidation specifies the transfer validation type to use. - // The default is nil (no transfer validation). - TransactionalValidation blob.TransferValidationType -} - -// StageBlockOptions contains the optional parameters for the Client.StageBlock method. -func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) { - if o == nil { - return nil, nil, nil, nil - } - - return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo -} - -// --------------------------------------------------------------------------------------------------------------------- - -// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. -type StageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - LeaseAccessConditions *blob.LeaseAccessConditions - - SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - - // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. - SourceContentValidation blob.SourceContentValidationType - - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo -} - -func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - options := &generated.BlockBlobClientStageBlockFromURLOptions{ - CopySourceAuthorization: o.CopySourceAuthorization, - SourceRange: exported.FormatHTTPRange(o.Range), - } - - if o.SourceContentValidation != nil { - o.SourceContentValidation.Apply(options) - } - - return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. -type CommitBlockListOptions struct { - Tags map[string]string - Metadata map[string]*string - RequestID *string - Tier *blob.AccessTier - Timeout *int32 - HTTPHeaders *blob.HTTPHeaders - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - AccessConditions *blob.AccessConditions - LegalHold *bool - ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting - ImmutabilityPolicyExpiryTime *time.Time - - // Deprecated: TransactionalContentCRC64 cannot be generated - TransactionalContentCRC64 []byte - - // Deprecated: TransactionalContentMD5 cannot be generated - TransactionalContentMD5 []byte -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. -type GetBlockListOptions struct { - Snapshot *string - AccessConditions *blob.AccessConditions -} - -func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions -} - -// ------------------------------------------------------------ - -// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. -type uploadFromReaderOptions struct { - // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. - // Note that the progress reporting is not always increasing; it can go down when retrying a request. - Progress func(bytesTransferred int64) - - // HTTPHeaders indicates the HTTP headers to be associated with the blob. - HTTPHeaders *blob.HTTPHeaders - - // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata map[string]*string - - // AccessConditions indicates the access conditions for the block blob. - AccessConditions *blob.AccessConditions - - // AccessTier indicates the tier of blob - AccessTier *blob.AccessTier - - // BlobTags - Tags map[string]string - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - - // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) - Concurrency uint16 - - TransactionalValidation blob.TransferValidationType - - // Deprecated: TransactionalContentCRC64 cannot be generated at block level - TransactionalContentCRC64 uint64 - - // Deprecated: TransactionalContentMD5 cannot be generated at block level - TransactionalContentMD5 []byte -} - -// UploadBufferOptions provides set of configurations for UploadBuffer operation. -type UploadBufferOptions = uploadFromReaderOptions - -// UploadFileOptions provides set of configurations for UploadFile operation. -type UploadFileOptions = uploadFromReaderOptions - -func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { - leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) - return &StageBlockOptions{ - CPKInfo: o.CPKInfo, - CPKScopeInfo: o.CPKScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - - TransactionalValidation: o.TransactionalValidation, - } -} - -func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { - return &UploadOptions{ - Tags: o.Tags, - Metadata: o.Metadata, - Tier: o.AccessTier, - HTTPHeaders: o.HTTPHeaders, - AccessConditions: o.AccessConditions, - CPKInfo: o.CPKInfo, - CPKScopeInfo: o.CPKScopeInfo, - } -} - -func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { - return &CommitBlockListOptions{ - Tags: o.Tags, - Metadata: o.Metadata, - Tier: o.AccessTier, - HTTPHeaders: o.HTTPHeaders, - CPKInfo: o.CPKInfo, - CPKScopeInfo: o.CPKScopeInfo, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadStreamOptions provides set of configurations for UploadStream operation. -type UploadStreamOptions struct { - // BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. - BlockSize int64 - - // Concurrency defines the max number of concurrent uploads to be performed to upload the file. - // Each concurrent upload will create a buffer of size BlockSize. The default value is one. - Concurrency int - - TransactionalValidation blob.TransferValidationType - - HTTPHeaders *blob.HTTPHeaders - Metadata map[string]*string - AccessConditions *blob.AccessConditions - AccessTier *blob.AccessTier - Tags map[string]string - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo -} - -func (u *UploadStreamOptions) setDefaults() { - if u.Concurrency == 0 { - u.Concurrency = 1 - } - - if u.BlockSize < _1MiB { - u.BlockSize = _1MiB - } -} - -func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { - if u == nil { - return nil - } - - leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) - return &StageBlockOptions{ - TransactionalValidation: u.TransactionalValidation, - CPKInfo: u.CPKInfo, - CPKScopeInfo: u.CPKScopeInfo, - LeaseAccessConditions: leaseAccessConditions, - } -} - -func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { - if u == nil { - return nil - } - - return &CommitBlockListOptions{ - Tags: u.Tags, - Metadata: u.Metadata, - Tier: u.AccessTier, - HTTPHeaders: u.HTTPHeaders, - CPKInfo: u.CPKInfo, - CPKScopeInfo: u.CPKScopeInfo, - AccessConditions: u.AccessConditions, - } -} - -func (u *UploadStreamOptions) getUploadOptions() *UploadOptions { - if u == nil { - return nil - } - - return &UploadOptions{ - Tags: u.Tags, - Metadata: u.Metadata, - Tier: u.AccessTier, - HTTPHeaders: u.HTTPHeaders, - CPKInfo: u.CPKInfo, - CPKScopeInfo: u.CPKScopeInfo, - AccessConditions: u.AccessConditions, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ExpiryType defines values for ExpiryType. -type ExpiryType = exported.ExpiryType - -// ExpiryTypeAbsolute defines the absolute time for the blob expiry. -type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute - -// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry. -type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow - -// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry. -type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation - -// ExpiryTypeNever defines that the blob will be set to never expire. -type ExpiryTypeNever = exported.ExpiryTypeNever - -// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. -type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go deleted file mode 100644 index 917f71809..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package blockblob - -import ( - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// UploadResponse contains the response from method Client.Upload. -type UploadResponse = generated.BlockBlobClientUploadResponse - -// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL -type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse - -// StageBlockResponse contains the response from method Client.StageBlock. -type StageBlockResponse = generated.BlockBlobClientStageBlockResponse - -// CommitBlockListResponse contains the response from method Client.CommitBlockList. -type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse - -// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. -type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse - -// GetBlockListResponse contains the response from method Client.GetBlockList. -type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse - -// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. -type uploadFromReaderResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) - ContentCRC64 []byte -} - -func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { - return uploadFromReaderResponse{ - ClientRequestID: resp.ClientRequestID, - ContentMD5: resp.ContentMD5, - Date: resp.Date, - ETag: resp.ETag, - EncryptionKeySHA256: resp.EncryptionKeySHA256, - EncryptionScope: resp.EncryptionScope, - IsServerEncrypted: resp.IsServerEncrypted, - LastModified: resp.LastModified, - RequestID: resp.RequestID, - Version: resp.Version, - VersionID: resp.VersionID, - } -} - -func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { - return uploadFromReaderResponse{ - ClientRequestID: resp.ClientRequestID, - ContentMD5: resp.ContentMD5, - Date: resp.Date, - ETag: resp.ETag, - EncryptionKeySHA256: resp.EncryptionKeySHA256, - EncryptionScope: resp.EncryptionScope, - IsServerEncrypted: resp.IsServerEncrypted, - LastModified: resp.LastModified, - RequestID: resp.RequestID, - Version: resp.Version, - VersionID: resp.VersionID, - ContentCRC64: resp.ContentCRC64, - } -} - -// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. -type UploadFileResponse = uploadFromReaderResponse - -// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. -type UploadBufferResponse = uploadFromReaderResponse - -// UploadStreamResponse contains the response from method Client.CommitBlockList. -type UploadStreamResponse = CommitBlockListResponse - -// SetExpiryResponse contains the response from method Client.SetExpiry. -type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml deleted file mode 100644 index 030350338..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ /dev/null @@ -1,34 +0,0 @@ -trigger: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/storage/azblob - -pr: - branches: - include: - - main - - feature/* - - hotfix/* - - release/* - paths: - include: - - sdk/storage/azblob - - -stages: - - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - ServiceDirectory: 'storage/azblob' - RunLiveTests: true - UsePipelineProxy: false - EnvVars: - AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) - AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) - AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET) - AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go deleted file mode 100644 index c511d8a79..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ /dev/null @@ -1,164 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azblob - -import ( - "context" - "io" - "os" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. -type Client struct { - svc *service.Client -} - -// NewClient creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) - if err != nil { - return nil, err - } - - return &Client{ - svc: svcClient, - }, nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a storage account or with a shared access signature (SAS) token. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) - if err != nil { - return nil, err - } - - return &Client{ - svc: svcClient, - }, nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ -// - cred - a SharedKeyCredential created with the matching storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { - svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options)) - if err != nil { - return nil, err - } - - return &Client{ - svc: svcClient, - }, nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) - if err != nil { - return nil, err - } - return &Client{ - svc: svcClient, - }, nil -} - -// URL returns the URL endpoint used by the BlobClient object. -func (c *Client) URL() string { - return c.svc.URL() -} - -// ServiceClient returns the embedded service client for this client. -func (c *Client) ServiceClient() *service.Client { - return c.svc -} - -// CreateContainer is a lifecycle method to creates a new container under the specified account. -// If the container with the same name already exists, a ResourceExistsError will be raised. -// This method returns a client with which to interact with the newly created container. -func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { - return c.svc.CreateContainer(ctx, containerName, o) -} - -// DeleteContainer is a lifecycle method that marks the specified container for deletion. -// The container and any blobs contained within it are later deleted during garbage collection. -// If the container is not found, a ResourceNotFoundError will be raised. -func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) { - return c.svc.DeleteContainer(ctx, containerName, o) -} - -// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) { - return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) -} - -// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { - return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) -} - -// NewListContainersPager operation returns a pager of the containers under the specified account. -// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. -func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { - return c.svc.NewListContainersPager(o) -} - -// UploadBuffer uploads a buffer in blocks to a block blob. -func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { - return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o) -} - -// UploadFile uploads a file in blocks to a block blob. -func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { - return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o) -} - -// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. -// A Context deadline or cancellation will cause this to error. -func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { - return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o) -} - -// DownloadBuffer downloads an Azure blob to a buffer with parallel. -func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) { - return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) -} - -// DownloadFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) { - return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o) -} - -// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) { - o = shared.CopyOptions(o) - return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go deleted file mode 100644 index 48771e8c9..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" -) - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential = exported.SharedKeyCredential - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - return exported.NewSharedKeyCredential(accountName, accountKey) -} - -// URLParts object represents the components that make up an Azure Storage Container/Blob URL. -// NOTE: Changing any SAS-related field requires computing a new SAS signature. -type URLParts = sas.URLParts - -// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. -func ParseURL(u string) (URLParts, error) { - return sas.ParseURL(u) -} - -// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset and zero value count indicates from the offset to the resource's end. -type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go deleted file mode 100644 index c42fcdec7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// PublicAccessType defines values for AccessType - private (default) or blob or container. -type PublicAccessType = generated.PublicAccessType - -const ( - PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob - PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer -) - -// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. -func PossiblePublicAccessTypeValues() []PublicAccessType { - return generated.PossiblePublicAccessTypeValues() -} - -// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType. -type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType - -const ( - DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude - DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly -) - -// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. -func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { - return generated.PossibleDeleteSnapshotsOptionTypeValues() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go deleted file mode 100644 index 83edea72b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go +++ /dev/null @@ -1,94 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package container - -import ( - "context" - "fmt" - "net/url" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. -// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. -type BatchBuilder struct { - endpoint string - authPolicy policy.Policy - subRequests []*policy.Request - operationType *exported.BlobBatchOperationType -} - -func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { - if bb.operationType == nil { - bb.operationType = &operationType - return nil - } - if *bb.operationType != operationType { - return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) - } - return nil -} - -// Delete operation is used to add delete sub-request to the batch builder. -func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error { - err := bb.checkOperationType(exported.BatchDeleteOperationType) - if err != nil { - return err - } - - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(bb.endpoint, blobName) - - blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) - if err != nil { - return err - } - - deleteOptions, leaseInfo, accessConditions := options.format() - req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) - if err != nil { - return err - } - - // remove x-ms-version header - exported.UpdateSubRequestHeaders(req) - - bb.subRequests = append(bb.subRequests, req) - return nil -} - -// SetTier operation is used to add set tier sub-request to the batch builder. -func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { - err := bb.checkOperationType(exported.BatchSetTierOperationType) - if err != nil { - return err - } - - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(bb.endpoint, blobName) - - blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) - if err != nil { - return err - } - - setTierOptions, leaseInfo, accessConditions := options.format() - req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) - if err != nil { - return err - } - - // remove x-ms-version header - exported.UpdateSubRequestHeaders(req) - - bb.subRequests = append(bb.subRequests, req) - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go deleted file mode 100644 index b23798cb1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ /dev/null @@ -1,437 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package container - -import ( - "bytes" - "context" - "errors" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "net/http" - "net/url" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. -type Client base.Client[generated.ContainerClient] - -// NewClient creates an instance of Client with the specified values. -// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a container or with a shared access signature (SAS) token. -// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container -// - cred - a SharedKeyCredential created with the matching container's storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - containerName - the name of the container within the storage account -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName) - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -func (c *Client) generated() *generated.ContainerClient { - return base.InnerClient((*base.Client[generated.ContainerClient])(c)) -} - -func (c *Client) sharedKey() *SharedKeyCredential { - return base.SharedKey((*base.Client[generated.ContainerClient])(c)) -} - -func (c *Client) credential() any { - return base.Credential((*base.Client[generated.ContainerClient])(c)) -} - -// helper method to return the generated.BlobClient which is used for creating the sub-requests -func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { - return base.InnerClient((*base.Client[generated.BlobClient])(b)) -} - -func (c *Client) getClientOptions() *base.ClientOptions { - return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) -} - -// URL returns the URL endpoint used by the Client object. -func (c *Client) URL() string { - return c.generated().Endpoint() -} - -// NewBlobClient creates a new blob.Client object by concatenating blobName to the end of -// Client's URL. The blob name will be URL-encoded. -// The new blob.Client uses the same request policy pipeline as this Client. -func (c *Client) NewBlobClient(blobName string) *blob.Client { - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) -} - -// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of -// this Client's URL. The blob name will be URL-encoded. -// The new appendblob.Client uses the same request policy pipeline as this Client. -func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) -} - -// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of -// this Client's URL. The blob name will be URL-encoded. -// The new blockblob.Client uses the same request policy pipeline as this Client. -func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) -} - -// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of -// this Client's URL. The blob name will be URL-encoded. -// The new pageblob.Client uses the same request policy pipeline as this Client. -func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) -} - -// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. -func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { - var opts *generated.ContainerClientCreateOptions - var cpkScopes *generated.ContainerCPKScopeInfo - if options != nil { - opts = &generated.ContainerClientCreateOptions{ - Access: options.Access, - Metadata: options.Metadata, - } - cpkScopes = options.CPKScopeInfo - } - resp, err := c.generated().Create(ctx, opts, cpkScopes) - - return resp, err -} - -// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. -func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { - opts, leaseAccessConditions, modifiedAccessConditions := options.format() - resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions) - - return resp, err -} - -// Restore operation restore the contents and properties of a soft deleted container to a specified container. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container. -func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) { - urlParts, err := blob.ParseURL(c.URL()) - if err != nil { - return RestoreResponse{}, err - } - - opts := &generated.ContainerClientRestoreOptions{ - DeletedContainerName: &urlParts.ContainerName, - DeletedContainerVersion: &deletedContainerVersion, - } - resp, err := c.generated().Restore(ctx, opts) - - return resp, err -} - -// GetProperties returns the container's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. -func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { - // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetMetadata method at all simplifying the API. - // The optionals are nil, like they were in track 1.5 - opts, leaseAccessConditions := o.format() - - resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions) - return resp, err -} - -// SetMetadata sets the container's metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. -func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) { - metadataOptions, lac, mac := o.format() - resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac) - - return resp, err -} - -// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. -func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { - options, ac := o.format() - resp, err := c.generated().GetAccessPolicy(ctx, options, ac) - return resp, err -} - -// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { - accessPolicy, mac, lac, acl, err := o.format() - if err != nil { - return SetAccessPolicyResponse{}, err - } - resp, err := c.generated().SetAccessPolicy(ctx, acl, accessPolicy, mac, lac) - return resp, err -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { - getAccountInfoOptions := o.format() - resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions) - return resp, err -} - -// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { - listOptions := generated.ContainerClientListBlobFlatSegmentOptions{} - if o != nil { - listOptions.Include = o.Include.format() - listOptions.Marker = o.Marker - listOptions.Maxresults = o.MaxResults - listOptions.Prefix = o.Prefix - } - return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{ - More: func(page ListBlobsFlatResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) - } else { - listOptions.Marker = page.NextMarker - req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) - } - if err != nil { - return ListBlobsFlatResponse{}, err - } - resp, err := c.generated().InternalClient().Pipeline().Do(req) - if err != nil { - return ListBlobsFlatResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - // TOOD: storage error? - return ListBlobsFlatResponse{}, runtime.NewResponseError(resp) - } - return c.generated().ListBlobFlatSegmentHandleResponse(resp) - }, - }) -} - -// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the -// previously-returned Marker) to get the next segment. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { - listOptions := o.format() - return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ - More: func(page ListBlobsHierarchyResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) - } else { - listOptions.Marker = page.NextMarker - req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) - } - if err != nil { - return ListBlobsHierarchyResponse{}, err - } - resp, err := c.generated().InternalClient().Pipeline().Do(req) - if err != nil { - return ListBlobsHierarchyResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp) - } - return c.generated().ListBlobHierarchySegmentHandleResponse(resp) - }, - }) -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { - if c.sharedKey() == nil { - return "", bloberror.MissingSharedKeyCredential - } - st := o.format() - urlParts, err := blob.ParseURL(c.URL()) - if err != nil { - return "", err - } - // Containers do not have snapshots, nor versions. - qps, err := sas.BlobSignatureValues{ - Version: sas.Version, - Protocol: sas.ProtocolHTTPS, - ContainerName: urlParts.ContainerName, - Permissions: permissions.String(), - StartTime: st, - ExpiryTime: expiry.UTC(), - }.SignWithSharedKey(c.sharedKey()) - if err != nil { - return "", err - } - - endpoint := c.URL() + "?" + qps.Encode() - - return endpoint, nil -} - -// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. -// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. -// All sub-requests in the batch must be of the same type, either delete or set tier. -func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { - var authPolicy policy.Policy - - switch cred := c.credential().(type) { - case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(c.getClientOptions())) - case *SharedKeyCredential: - authPolicy = exported.NewSharedKeyCredPolicy(cred) - case nil: - // for authentication using SAS - authPolicy = nil - default: - return nil, fmt.Errorf("unrecognised authentication type %T", cred) - } - - return &BatchBuilder{ - endpoint: c.URL(), - authPolicy: authPolicy, - }, nil -} - -// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. -// It builds the request body using the BatchBuilder object passed. -// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. -func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { - if bb == nil || len(bb.subRequests) == 0 { - return SubmitBatchResponse{}, errors.New("batch builder is empty") - } - - // create the request body - batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ - AuthPolicy: bb.authPolicy, - SubRequests: bb.subRequests, - }) - if err != nil { - return SubmitBatchResponse{}, err - } - - reader := bytes.NewReader(batchReq) - rsc := streaming.NopCloser(reader) - multipartContentType := "multipart/mixed; boundary=" + batchID - - resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) - if err != nil { - return SubmitBatchResponse{}, err - } - - batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) - if err != nil { - return SubmitBatchResponse{}, err - } - - return SubmitBatchResponse{ - Responses: batchResponses, - ContentType: resp.ContentType, - RequestID: resp.RequestID, - Version: resp.Version, - }, nil -} - -// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. -// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container -// eg. "dog='germanshepherd' and penguin='emperorpenguin'" -func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { - containerClientFilterBlobsOptions := o.format() - resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) - return resp, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go deleted file mode 100644 index 09a8e8ed3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go +++ /dev/null @@ -1,150 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package container - -import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - -// AccessTier defines values for blob access tiers. -type AccessTier = generated.AccessTier - -const ( - AccessTierArchive AccessTier = generated.AccessTierArchive - AccessTierCool AccessTier = generated.AccessTierCool - AccessTierHot AccessTier = generated.AccessTierHot - AccessTierP10 AccessTier = generated.AccessTierP10 - AccessTierP15 AccessTier = generated.AccessTierP15 - AccessTierP20 AccessTier = generated.AccessTierP20 - AccessTierP30 AccessTier = generated.AccessTierP30 - AccessTierP4 AccessTier = generated.AccessTierP4 - AccessTierP40 AccessTier = generated.AccessTierP40 - AccessTierP50 AccessTier = generated.AccessTierP50 - AccessTierP6 AccessTier = generated.AccessTierP6 - AccessTierP60 AccessTier = generated.AccessTierP60 - AccessTierP70 AccessTier = generated.AccessTierP70 - AccessTierP80 AccessTier = generated.AccessTierP80 - AccessTierPremium AccessTier = generated.AccessTierPremium -) - -// PossibleAccessTierValues returns the possible values for the AccessTier const type. -func PossibleAccessTierValues() []AccessTier { - return generated.PossibleAccessTierValues() -} - -// PublicAccessType defines values for AccessType - private (default) or blob or container. -type PublicAccessType = generated.PublicAccessType - -const ( - PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob - PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer -) - -// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. -func PossiblePublicAccessTypeValues() []PublicAccessType { - return generated.PossiblePublicAccessTypeValues() -} - -// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS. -type SKUName = generated.SKUName - -const ( - SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS - SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS - SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS - SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS - SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS -) - -// PossibleSKUNameValues returns the possible values for the SKUName const type. -func PossibleSKUNameValues() []SKUName { - return generated.PossibleSKUNameValues() -} - -// AccountKind defines values for AccountKind -type AccountKind = generated.AccountKind - -const ( - AccountKindStorage AccountKind = generated.AccountKindStorage - AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage - AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 - AccountKindFileStorage AccountKind = generated.AccountKindFileStorage - AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage -) - -// PossibleAccountKindValues returns the possible values for the AccountKind const type. -func PossibleAccountKindValues() []AccountKind { - return generated.PossibleAccountKindValues() -} - -// BlobType defines values for BlobType -type BlobType = generated.BlobType - -const ( - BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob - BlobTypePageBlob BlobType = generated.BlobTypePageBlob - BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob -) - -// PossibleBlobTypeValues returns the possible values for the BlobType const type. -func PossibleBlobTypeValues() []BlobType { - return generated.PossibleBlobTypeValues() -} - -// ArchiveStatus defines values for ArchiveStatus -type ArchiveStatus = generated.ArchiveStatus - -const ( - ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool - ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot -) - -// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. -func PossibleArchiveStatusValues() []ArchiveStatus { - return generated.PossibleArchiveStatusValues() -} - -// CopyStatusType defines values for CopyStatusType -type CopyStatusType = generated.CopyStatusType - -const ( - CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending - CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess - CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted - CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed -) - -// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. -func PossibleCopyStatusTypeValues() []CopyStatusType { - return generated.PossibleCopyStatusTypeValues() -} - -// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode -type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode - -const ( - ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable - ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked -) - -// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. -func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { - return generated.PossibleImmutabilityPolicyModeValues() -} - -// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. -// Valid values are High and Standard. -type RehydratePriority = generated.RehydratePriority - -const ( - RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh - RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard -) - -// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. -func PossibleRehydratePriorityValues() []RehydratePriority { - return generated.PossibleRehydratePriorityValues() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go deleted file mode 100644 index 61d936ab7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ /dev/null @@ -1,427 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package container - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "reflect" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential = exported.SharedKeyCredential - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - return exported.NewSharedKeyCredential(accountName, accountKey) -} - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CPKScopeInfo = generated.ContainerCPKScopeInfo - -// BlobFlatListSegment - List of BlobItem. -type BlobFlatListSegment = generated.BlobFlatListSegment - -// BlobHierarchyListSegment - List of BlobItem and BlobPrefix. -type BlobHierarchyListSegment = generated.BlobHierarchyListSegment - -// BlobProperties - Properties of a blob. -type BlobProperties = generated.BlobProperties - -// BlobItem - An Azure Storage blob. -type BlobItem = generated.BlobItem - -// BlobTags - Blob tags. -type BlobTags = generated.BlobTags - -// BlobPrefix is a blob's prefix when hierarchically listing blobs. -type BlobPrefix = generated.BlobPrefix - -// BlobTag - a key/value pair on a blob. -type BlobTag = generated.BlobTag - -// AccessConditions identifies container-specific access conditions which you optionally set. -type AccessConditions = exported.ContainerAccessConditions - -// LeaseAccessConditions contains optional parameters to access leased entity. -type LeaseAccessConditions = exported.LeaseAccessConditions - -// ModifiedAccessConditions contains a group of parameters for specifying access conditions. -type ModifiedAccessConditions = exported.ModifiedAccessConditions - -// AccessPolicy - An Access policy. -type AccessPolicy = generated.AccessPolicy - -// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. -// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. -type AccessPolicyPermission = exported.AccessPolicyPermission - -// SignedIdentifier - signed identifier. -type SignedIdentifier = generated.SignedIdentifier - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// CreateOptions contains the optional parameters for the Client.Create method. -type CreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access. - Access *PublicAccessType - - // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]*string - - // Optional. Specifies the encryption scope settings to set on the container. - CPKScopeInfo *CPKScopeInfo -} - -// --------------------------------------------------------------------------------------------------------------------- - -// DeleteOptions contains the optional parameters for the Client.Delete method. -type DeleteOptions struct { - AccessConditions *AccessConditions -} - -func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// RestoreOptions contains the optional parameters for the Client.Restore method. -type RestoreOptions struct { - // placeholder for future options -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type GetPropertiesOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ListBlobsInclude indicates what additional information the service should return with each blob. -type ListBlobsInclude struct { - Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool -} - -func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { - if reflect.ValueOf(l).IsZero() { - return nil - } - - include := []generated.ListBlobsIncludeItem{} - - if l.Copy { - include = append(include, generated.ListBlobsIncludeItemCopy) - } - if l.Deleted { - include = append(include, generated.ListBlobsIncludeItemDeleted) - } - if l.DeletedWithVersions { - include = append(include, generated.ListBlobsIncludeItemDeletedwithversions) - } - if l.ImmutabilityPolicy { - include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy) - } - if l.LegalHold { - include = append(include, generated.ListBlobsIncludeItemLegalhold) - } - if l.Metadata { - include = append(include, generated.ListBlobsIncludeItemMetadata) - } - if l.Snapshots { - include = append(include, generated.ListBlobsIncludeItemSnapshots) - } - if l.Tags { - include = append(include, generated.ListBlobsIncludeItemTags) - } - if l.UncommittedBlobs { - include = append(include, generated.ListBlobsIncludeItemUncommittedblobs) - } - if l.Versions { - include = append(include, generated.ListBlobsIncludeItemVersions) - } - - return include -} - -// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method. -type ListBlobsFlatOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include ListBlobsInclude - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by MaxResults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager -type ListBlobsHierarchyOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include ListBlobsInclude - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by MaxResults, or than the default of 5000. - MaxResults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method. -func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions { - if o == nil { - return generated.ContainerClientListBlobHierarchySegmentOptions{} - } - - return generated.ContainerClientListBlobHierarchySegmentOptions{ - Include: o.Include.format(), - Marker: o.Marker, - Maxresults: o.MaxResults, - Prefix: o.Prefix, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. -type GetSASURLOptions struct { - StartTime *time.Time -} - -func (o *GetSASURLOptions) format() time.Time { - if o == nil { - return time.Time{} - } - - var st time.Time - if o.StartTime != nil { - st = o.StartTime.UTC() - } else { - st = time.Time{} - } - return st -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. -type SetMetadataOptions struct { - Metadata map[string]*string - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. -type GetAccessPolicyOptions struct { - LeaseAccessConditions *LeaseAccessConditions -} - -func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.LeaseAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation. -type SetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access. - // If this header is not included in the request, container data is private to the account owner. - Access *PublicAccessType - AccessConditions *AccessConditions - ContainerACL []*SignedIdentifier -} - -func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions, []*SignedIdentifier, error) { - if o == nil { - return nil, nil, nil, nil, nil - } - if o.ContainerACL != nil { - for _, c := range o.ContainerACL { - err := formatTime(c) - if err != nil { - return nil, nil, nil, nil, err - } - } - } - lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) - return &generated.ContainerClientSetAccessPolicyOptions{ - Access: o.Access, - }, lac, mac, o.ContainerACL, nil -} - -func formatTime(c *SignedIdentifier) error { - if c.AccessPolicy == nil { - return nil - } - - if c.AccessPolicy.Start != nil { - st, err := time.Parse(time.RFC3339, c.AccessPolicy.Start.UTC().Format(time.RFC3339)) - if err != nil { - return err - } - c.AccessPolicy.Start = &st - } - if c.AccessPolicy.Expiry != nil { - et, err := time.Parse(time.RFC3339, c.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) - if err != nil { - return err - } - c.AccessPolicy.Expiry = &et - } - - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetAccountInfoOptions provides set of options for Client.GetAccountInfo -type GetAccountInfoOptions struct { - // placeholder for future options -} - -func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. -type BatchDeleteOptions struct { - blob.DeleteOptions - VersionID *string - Snapshot *string -} - -func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := generated.BlobClientDeleteOptions{ - DeleteSnapshots: o.DeleteSnapshots, - DeleteType: o.BlobDeleteType, // None by default - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, modifiedAccessConditions -} - -// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. -type BatchSetTierOptions struct { - blob.SetTierOptions - VersionID *string - Snapshot *string -} - -func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := generated.BlobClientSetTierOptions{ - RehydratePriority: o.RehydratePriority, - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, modifiedAccessConditions -} - -// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. -type SubmitBatchOptions struct { - // placeholder for future options -} - -func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// FilterBlobsOptions provides set of options for Client.FilterBlobs. -type FilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 -} - -func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { - if o == nil { - return nil - } - return &generated.ContainerClientFilterBlobsOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go deleted file mode 100644 index 9aaefe277..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package container - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// CreateResponse contains the response from method Client.Create. -type CreateResponse = generated.ContainerClientCreateResponse - -// DeleteResponse contains the response from method Client.Delete. -type DeleteResponse = generated.ContainerClientDeleteResponse - -// RestoreResponse contains the response from method Client.Restore. -type RestoreResponse = generated.ContainerClientRestoreResponse - -// GetPropertiesResponse contains the response from method Client.GetProperties. -type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse - -// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. -type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse - -// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. -type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse - -// ListBlobsHierarchySegmentResponse - An enumeration of blobs -type ListBlobsHierarchySegmentResponse = generated.ListBlobsHierarchySegmentResponse - -// SetMetadataResponse contains the response from method Client.SetMetadata. -type SetMetadataResponse = generated.ContainerClientSetMetadataResponse - -// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. -type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse - -// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. -type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse - -// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. -type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse - -// SubmitBatchResponse contains the response from method Client.SubmitBatch. -type SubmitBatchResponse struct { - // Responses contains the responses of the sub-requests in the batch - Responses []*BatchResponseItem - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BatchResponseItem contains the response for the individual sub-requests. -type BatchResponseItem = exported.BatchResponseItem - -// FilterBlobsResponse contains the response from method Client.FilterBlobs. -type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go deleted file mode 100644 index 9a4806c57..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ /dev/null @@ -1,210 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -/* - -Package azblob can access an Azure Blob Storage. - -The azblob package is capable of :- - - Creating, deleting, and querying containers in an account - - Creating, deleting, and querying blobs in a container - - Creating Shared Access Signature for authentication - -Types of Resources - -The azblob package allows you to interact with three types of resources :- - -* Azure storage accounts. -* Containers within those storage accounts. -* Blobs (block blobs/ page blobs/ append blobs) within those containers. - -The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object. -To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account. - -Types of Credentials - -The clients support different forms of authentication. -The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, -or authorization with a Shared Access Signature token. - -Using a Shared Key - -To use an account shared key (aka account key or access key), provide the key as a string. -This can be found in your storage account in the Azure Portal under the "Access Keys" section. - -Use the key as the credential parameter to authenticate the client: - - accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") - } - accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") - } - - serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - - cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) - handle(err) - - serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) - handle(err) - - fmt.Println(serviceClient.URL()) - -Using a Connection String - -Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. -To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. -The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. - - connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" - serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil) - handle(err) - -Using a Shared Access Signature (SAS) Token - -To use a shared access signature (SAS) token, provide the token at the end of your service URL. -You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. - - accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") - } - accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") - } - serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) - - cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) - handle(err) - serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) - handle(err) - fmt.Println(serviceClient.URL()) - - // Alternatively, you can create SAS on the fly - - resources := sas.AccountResourceTypes{Service: true} - permission := sas.AccountPermissions{Read: true} - start := time.Now() - expiry := start.AddDate(0, 0, 1) - serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) - handle(err) - - serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil) - handle(err) - - fmt.Println(serviceClientWithSAS.URL()) - -Types of Clients - -There are three different clients provided to interact with the various components of the Blob Service: - -1. **`ServiceClient`** - * Get and set account settings. - * Query, create, and delete containers within the account. - -2. **`ContainerClient`** - * Get and set container access settings, properties, and metadata. - * Create, delete, and query blobs within the container. - * `ContainerLeaseClient` to support container lease management. - -3. **`BlobClient`** - * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` - * Get and set blob properties. - * Perform CRUD operations on a given blob. - * `BlobLeaseClient` to support blob lease management. - -Examples - - // Your account name and key can be obtained from the Azure Portal. - accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") - } - - accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") - if !ok { - panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") - } - cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) - handle(err) - - // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ - serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) - handle(err) - - // ===== 1. Create a container ===== - - // First, create a container client, and use the Create method to create a new container in your account - containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer") - handle(err) - - // All APIs have an options' bag struct as a parameter. - // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. - // If you want to use the default options, pass in nil. - _, err = containerClient.Create(context.TODO(), nil) - handle(err) - - // ===== 2. Upload and Download a block blob ===== - uploadData := "Hello world!" - - // Create a new blockBlobClient from the containerClient - blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt") - handle(err) - - // Upload data to the block blob - blockBlobUploadOptions := blockblob.UploadOptions{ - Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, - Tags: map[string]string{"Year": "2022"}, - } - _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) - handle(err) - - // Download the blob's contents and ensure that the download worked properly - blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil) - handle(err) - - // Use the bytes.Buffer object to read the downloaded data. - // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. - reader := blobDownloadResponse.Body(nil) - downloadData, err := io.ReadAll(reader) - handle(err) - if string(downloadData) != uploadData { - handle(errors.New("uploaded data should be same as downloaded data")) - } - - if err = reader.Close(); err != nil { - handle(err) - return - } - - // ===== 3. List blobs ===== - // List methods returns a pager object which can be used to iterate over the results of a paging operation. - // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. - // PageResponse() can be used to iterate over the results of the specific page. - // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. - pager := containerClient.NewListBlobsFlatPager(nil) - for pager.More() { - resp, err := pager.NextPage(context.TODO()) - handle(err) - for _, v := range resp.Segment.BlobItems { - fmt.Println(*v.Name) - } - } - - // Delete the blob. - _, err = blockBlobClient.Delete(context.TODO(), nil) - handle(err) - - // Delete the container. - _, err = containerClient.Delete(context.TODO(), nil) - handle(err) -*/ - -package azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go deleted file mode 100644 index 073de855b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ /dev/null @@ -1,129 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package base - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - "strings" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions - - // Audience to use when requesting tokens for Azure Active Directory authentication. - // Only has an effect when credential is of type TokenCredential. The value could be - // https://storage.azure.com/ (default) or https://.blob.core.windows.net. - Audience string -} - -type Client[T any] struct { - inner *T - credential any - options *ClientOptions -} - -func InnerClient[T any](client *Client[T]) *T { - return client.inner -} - -func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { - switch cred := client.credential.(type) { - case *exported.SharedKeyCredential: - return cred - default: - return nil - } -} - -func Credential[T any](client *Client[T]) any { - return client.credential -} - -func GetClientOptions[T any](client *Client[T]) *ClientOptions { - return client.options -} - -func GetAudience(clOpts *ClientOptions) string { - if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { - return shared.TokenScope - } else { - return strings.TrimRight(clOpts.Audience, "/") + "/.default" - } -} - -func NewClient[T any](inner *T) *Client[T] { - return &Client[T]{inner: inner} -} - -func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { - return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, azClient), - credential: credential, - options: options, - } -} - -func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { - return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, azClient), - credential: credential, - options: options, - } -} - -func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { - return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, azClient), - credential: credential, - options: options, - } -} - -type CompositeClient[T, U any] struct { - innerT *T - innerU *U - sharedKey *exported.SharedKeyCredential -} - -func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{ - inner: client.innerT, - credential: client.sharedKey, - }, client.innerU -} - -func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { - return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ - innerT: generated.NewBlobClient(blobURL, azClient), - innerU: generated.NewAppendBlobClient(blobURL, azClient), - sharedKey: sharedKey, - } -} - -func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { - return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ - innerT: generated.NewBlobClient(blobURL, azClient), - innerU: generated.NewBlockBlobClient(blobURL, azClient), - sharedKey: sharedKey, - } -} - -func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { - return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ - innerT: generated.NewBlobClient(blobURL, azClient), - innerU: generated.NewPageBlobClient(blobURL, azClient), - sharedKey: sharedKey, - } -} - -func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { - return client.sharedKey -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go deleted file mode 100644 index 96d188fa5..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - -const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" - -// ContainerAccessConditions identifies container-specific access conditions which you optionally set. -type ContainerAccessConditions struct { - ModifiedAccessConditions *ModifiedAccessConditions - LeaseAccessConditions *LeaseAccessConditions -} - -func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { - if b == nil { - return nil, nil - } - return b.LeaseAccessConditions, b.ModifiedAccessConditions -} - -// BlobAccessConditions identifies blob-specific access conditions which you optionally set. -type BlobAccessConditions struct { - LeaseAccessConditions *LeaseAccessConditions - ModifiedAccessConditions *ModifiedAccessConditions -} - -func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { - if b == nil { - return nil, nil - } - return b.LeaseAccessConditions, b.ModifiedAccessConditions -} - -// LeaseAccessConditions contains optional parameters to access leased entity. -type LeaseAccessConditions = generated.LeaseAccessConditions - -// ModifiedAccessConditions contains a group of parameters for specifying access conditions. -type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go deleted file mode 100644 index 14c293cf6..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "bytes" - "fmt" -) - -// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. -// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. -type AccessPolicyPermission struct { - Read, Add, Create, Write, Delete, List bool -} - -// String produces the access policy permission string for an Azure Storage container. -// Call this method to set AccessPolicy's Permission field. -func (p *AccessPolicyPermission) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.List { - b.WriteRune('l') - } - return b.String() -} - -// Parse initializes the AccessPolicyPermission's fields from a string. -func (p *AccessPolicyPermission) Parse(s string) error { - *p = AccessPolicyPermission{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - default: - return fmt.Errorf("invalid permission: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go deleted file mode 100644 index c26c62aa8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go +++ /dev/null @@ -1,280 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "mime" - "mime/multipart" - "net/http" - "net/textproto" - "strconv" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -const ( - batchIdPrefix = "batch_" - httpVersion = "HTTP/1.1" - httpNewline = "\r\n" -) - -// createBatchID is used for creating a new batch id which is used as batch boundary in the request body -func createBatchID() (string, error) { - batchID, err := uuid.New() - if err != nil { - return "", err - } - - return batchIdPrefix + batchID.String(), nil -} - -// buildSubRequest is used for building the sub-request. Example: -// DELETE /container0/blob0 HTTP/1.1 -// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account: -// Content-Length: 0 -func buildSubRequest(req *policy.Request) []byte { - var batchSubRequest strings.Builder - blobPath := req.Raw().URL.EscapedPath() - if len(req.Raw().URL.RawQuery) > 0 { - blobPath += "?" + req.Raw().URL.RawQuery - } - - batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) - - for k, v := range req.Raw().Header { - if strings.EqualFold(k, shared.HeaderXmsVersion) { - continue - } - if len(v) > 0 { - batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) - } - } - - batchSubRequest.WriteString(httpNewline) - return []byte(batchSubRequest.String()) -} - -// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. -// -// Example of a sub-request in the batch request body: -// -// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 -// Content-Type: application/http -// Content-Transfer-Encoding: binary -// Content-ID: 0 -// -// DELETE /container0/blob0 HTTP/1.1 -// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account: -// Content-Length: 0 -func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { - batchID, err := createBatchID() - if err != nil { - return nil, "", err - } - - // Create a new multipart buffer - reqBody := &bytes.Buffer{} - writer := multipart.NewWriter(reqBody) - - // Set the boundary - err = writer.SetBoundary(batchID) - if err != nil { - return nil, "", err - } - - partHeaders := make(textproto.MIMEHeader) - partHeaders["Content-Type"] = []string{"application/http"} - partHeaders["Content-Transfer-Encoding"] = []string{"binary"} - var partWriter io.Writer - - for i, req := range bb.SubRequests { - if bb.AuthPolicy != nil { - _, err := bb.AuthPolicy.Do(req) - if err != nil && !strings.EqualFold(err.Error(), "no more policies") { - if log.Should(EventSubmitBatch) { - log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) - } - return nil, "", err - } - } - - partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} - partWriter, err = writer.CreatePart(partHeaders) - if err != nil { - return nil, "", err - } - - _, err = partWriter.Write(buildSubRequest(req)) - if err != nil { - return nil, "", err - } - } - - // Close the multipart writer - err = writer.Close() - if err != nil { - return nil, "", err - } - - return reqBody.Bytes(), batchID, nil -} - -// UpdateSubRequestHeaders updates the sub-request headers. -// Removes x-ms-version header. -func UpdateSubRequestHeaders(req *policy.Request) { - // remove x-ms-version header from the request header - for k := range req.Raw().Header { - if strings.EqualFold(k, shared.HeaderXmsVersion) { - delete(req.Raw().Header, k) - } - } -} - -// BatchResponseItem contains the response for the individual sub-requests. -type BatchResponseItem struct { - ContentID *int - ContainerName *string - BlobName *string - RequestID *string - Version *string - Error error // nil error indicates that the batch sub-request operation is successful -} - -func getResponseBoundary(contentType *string) (string, error) { - if contentType == nil { - return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") - } - - _, params, err := mime.ParseMediaType(*contentType) - if err != nil { - return "", err - } - - if val, ok := params["boundary"]; ok { - return val, nil - } else { - return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) - } -} - -func getContentID(part *multipart.Part) (*int, error) { - contentID := part.Header.Get("Content-ID") - if contentID == "" { - return nil, nil - } - - val, err := strconv.Atoi(strings.TrimSpace(contentID)) - if err != nil { - return nil, err - } - return &val, nil -} - -func getResponseHeader(key string, resp *http.Response) *string { - val := resp.Header.Get(key) - if val == "" { - return nil - } - return &val -} - -// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. -func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { - boundary, err := getResponseBoundary(contentType) - if err != nil { - return nil, err - } - - respReader := multipart.NewReader(respBody, boundary) - var responses []*BatchResponseItem - - for { - part, err := respReader.NextPart() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return nil, err - } - - batchSubResponse := &BatchResponseItem{} - batchSubResponse.ContentID, err = getContentID(part) - if err != nil { - return nil, err - } - - if batchSubResponse.ContentID != nil { - path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") - p := strings.Split(path, "/") - batchSubResponse.ContainerName = to.Ptr(p[0]) - batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) - } - - respBytes, err := io.ReadAll(part) - if err != nil { - return nil, err - } - respBytes = append(respBytes, byte('\n')) - buf := bytes.NewBuffer(respBytes) - resp, err := http.ReadResponse(bufio.NewReader(buf), nil) - // sub-response parsing error - if err != nil { - return nil, err - } - - batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) - batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) - - // sub-response failure - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - if len(responses) == 0 && batchSubResponse.ContentID == nil { - // this case can happen when the parent request fails. - // For example, batch request having more than 256 sub-requests. - return nil, fmt.Errorf("%v", string(respBytes)) - } - - resp.Request = subRequests[*batchSubResponse.ContentID].Raw() - batchSubResponse.Error = runtime.NewResponseError(resp) - } - - responses = append(responses, batchSubResponse) - } - - if len(responses) != len(subRequests) { - return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) - } - - return responses, nil -} - -// not exported but used for batch request creation - -// BlobBatchBuilder is used for creating the blob batch request -type BlobBatchBuilder struct { - AuthPolicy policy.Policy - SubRequests []*policy.Request -} - -// BlobBatchOperationType defines the operation of the blob batch sub-requests. -type BlobBatchOperationType string - -const ( - BatchDeleteOperationType BlobBatchOperationType = "delete" - BatchSetTierOperationType BlobBatchOperationType = "set tier" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go deleted file mode 100644 index d0355727c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "fmt" - "strconv" -) - -// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset and zero value count indicates from the offset to the resource's end. -type HTTPRange struct { - Offset int64 - Count int64 -} - -// FormatHTTPRange converts an HTTPRange to its string format. -func FormatHTTPRange(r HTTPRange) *string { - if r.Offset == 0 && r.Count == 0 { - return nil // No specified range - } - endOffset := "" // if count == CountToEnd (0) - if r.Count > 0 { - endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) - } - dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) - return &dataRange -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go deleted file mode 100644 index d775fb5c8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" -) - -// NOTE: these are publicly exported via type-aliasing in azblob/log.go -const ( - // EventUpload is used when we compute number of blocks to upload and size of each block. - EventUpload log.Event = "azblob.Upload" - - // EventSubmitBatch is used for logging events related to submit blob batch operation. - EventSubmitBatch log.Event = "azblob.SubmitBatch" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go deleted file mode 100644 index 71473deca..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go +++ /dev/null @@ -1,71 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "net/http" - "strconv" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// ExpiryType defines values for ExpiryType -type ExpiryType interface { - Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) - notPubliclyImplementable() -} - -// ExpiryTypeAbsolute defines the absolute time for the blob expiry -type ExpiryTypeAbsolute time.Time - -// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry -type ExpiryTypeRelativeToNow time.Duration - -// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry -type ExpiryTypeRelativeToCreation time.Duration - -// ExpiryTypeNever defines that the blob will be set to never expire -type ExpiryTypeNever struct { - // empty struct since NeverExpire expiry type does not require expiry time -} - -// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. -type SetExpiryOptions struct { - // placeholder for future options -} - -func (e ExpiryTypeAbsolute) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { - return generated.ExpiryOptionsAbsolute, &generated.BlobClientSetExpiryOptions{ - ExpiresOn: to.Ptr(time.Time(e).UTC().Format(http.TimeFormat)), - } -} - -func (e ExpiryTypeAbsolute) notPubliclyImplementable() {} - -func (e ExpiryTypeRelativeToNow) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { - return generated.ExpiryOptionsRelativeToNow, &generated.BlobClientSetExpiryOptions{ - ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), - } -} - -func (e ExpiryTypeRelativeToNow) notPubliclyImplementable() {} - -func (e ExpiryTypeRelativeToCreation) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { - return generated.ExpiryOptionsRelativeToCreation, &generated.BlobClientSetExpiryOptions{ - ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), - } -} - -func (e ExpiryTypeRelativeToCreation) notPubliclyImplementable() {} - -func (e ExpiryTypeNever) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { - return generated.ExpiryOptionsNeverExpire, nil -} - -func (e ExpiryTypeNever) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go deleted file mode 100644 index adf46b068..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ /dev/null @@ -1,225 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "sync/atomic" - "time" - - azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { - c := SharedKeyCredential{accountName: accountName} - if err := c.SetAccountKey(accountKey); err != nil { - return nil, err - } - return &c, nil -} - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential struct { - // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only - accountName string - accountKey atomic.Value // []byte -} - -// AccountName returns the Storage account's name. -func (c *SharedKeyCredential) AccountName() string { - return c.accountName -} - -// SetAccountKey replaces the existing account key with the specified account key. -func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { - _bytes, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return fmt.Errorf("decode account key: %w", err) - } - c.accountKey.Store(_bytes) - return nil -} - -// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { - h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) - _, err := h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)), err -} - -func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - headers := req.Header - contentLength := getHeader(shared.HeaderContentLength, headers) - if contentLength == "0" { - contentLength = "" - } - - canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) - if err != nil { - return "", err - } - - stringToSign := strings.Join([]string{ - req.Method, - getHeader(shared.HeaderContentEncoding, headers), - getHeader(shared.HeaderContentLanguage, headers), - contentLength, - getHeader(shared.HeaderContentMD5, headers), - getHeader(shared.HeaderContentType, headers), - "", // Empty date because x-ms-date is expected (as per web page above) - getHeader(shared.HeaderIfModifiedSince, headers), - getHeader(shared.HeaderIfMatch, headers), - getHeader(shared.HeaderIfNoneMatch, headers), - getHeader(shared.HeaderIfUnmodifiedSince, headers), - getHeader(shared.HeaderRange, headers), - c.buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - return stringToSign, nil -} - -func getHeader(key string, headers map[string][]string) string { - if headers == nil { - return "" - } - if v, ok := headers[key]; ok { - if len(v) > 0 { - return v[0] - } - } - - return "" -} - -func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { - cm := map[string][]string{} - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v // NOTE: the value must not have any whitespace around it. - } - } - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - sort.Strings(keys) - ch := bytes.NewBufferString("") - for i, key := range keys { - if i > 0 { - ch.WriteRune('\n') - } - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(strings.Join(cm[key], ",")) - } - return ch.String() -} - -func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - cr := bytes.NewBufferString("/") - cr.WriteString(c.accountName) - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } else { - // a slash is required to indicate the root path - cr.WriteString("/") - } - - // params is a map[string][]string; param name is key; params values is []string - params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values - if err != nil { - return "", fmt.Errorf("failed to parse query params: %w", err) - } - - if len(params) > 0 { // There is at least 1 query parameter - var paramNames []string // We use this to sort the parameter key names - for paramName := range params { - paramNames = append(paramNames, paramName) // paramNames must be lowercase - } - sort.Strings(paramNames) - - for _, paramName := range paramNames { - paramValues := params[paramName] - sort.Strings(paramValues) - - // Join the sorted key values separated by ',' - // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) - } - } - return cr.String(), nil -} - -// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. -func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { - return cred.computeHMACSHA256(message) -} - -// the following content isn't actually exported but must live -// next to SharedKeyCredential as it uses its unexported methods - -type SharedKeyCredPolicy struct { - cred *SharedKeyCredential -} - -func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { - return &SharedKeyCredPolicy{cred: cred} -} - -func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { - // skip adding the authorization header if no SharedKeyCredential was provided. - // this prevents a panic that might be hard to diagnose and allows testing - // against http endpoints that don't require authentication. - if s.cred == nil { - return req.Next() - } - - if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { - req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) - } - stringToSign, err := s.cred.buildStringToSign(req.Raw()) - if err != nil { - return nil, err - } - signature, err := s.cred.computeHMACSHA256(stringToSign) - if err != nil { - return nil, err - } - authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") - req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) - - response, err := req.Next() - if err != nil && response != nil && response.StatusCode == http.StatusForbidden { - // Service failed to authenticate request, log it - log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") - } - return response, err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go deleted file mode 100644 index f3e571fa6..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -import ( - "bytes" - "encoding/binary" - "hash/crc64" - "io" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// TransferValidationType abstracts the various mechanisms used to verify a transfer. -type TransferValidationType interface { - Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) - notPubliclyImplementable() -} - -// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. -type TransferValidationTypeCRC64 uint64 - -func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, uint64(c)) - cfg.SetCRC64(buf) - return rsc, nil -} - -func (TransferValidationTypeCRC64) notPubliclyImplementable() {} - -// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. -func TransferValidationTypeComputeCRC64() TransferValidationType { - return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { - buf, err := io.ReadAll(rsc) - if err != nil { - return nil, err - } - - crc := crc64.Checksum(buf, shared.CRC64Table) - return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg) - }) -} - -// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. -type TransferValidationTypeMD5 []byte - -func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { - cfg.SetMD5(c) - return rsc, nil -} - -func (TransferValidationTypeMD5) notPubliclyImplementable() {} - -type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) - -func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { - return t(rsc, cfg) -} - -func (transferValidationTypeFn) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go deleted file mode 100644 index 2e2dd16e4..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package exported - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it -func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { - return &UserDelegationCredential{ - accountName: accountName, - userDelegationKey: udk, - } -} - -// UserDelegationKey contains UserDelegationKey. -type UserDelegationKey = generated.UserDelegationKey - -// UserDelegationCredential contains an account's name and its user delegation key. -type UserDelegationCredential struct { - accountName string - userDelegationKey UserDelegationKey -} - -// getAccountName returns the Storage account's Name -func (f *UserDelegationCredential) getAccountName() string { - return f.accountName -} - -// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. -func GetAccountName(udc *UserDelegationCredential) string { - return udc.getAccountName() -} - -// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { - bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) - h := hmac.New(sha256.New, bytes) - _, err := h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)), err -} - -// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. -func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { - return udc.computeHMACSHA256(message) -} - -// getUDKParams returns UserDelegationKey -func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { - return &f.userDelegationKey -} - -// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. -func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { - return udc.getUDKParams() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go deleted file mode 100644 index 2c7912dd7..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package exported - -const ( - ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" - ModuleVersion = "v1.3.1" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go deleted file mode 100644 index 288df7edd..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -func (client *AppendBlobClient) Endpoint() string { - return client.endpoint -} - -func (client *AppendBlobClient) InternalClient() *azcore.Client { - return client.internal -} - -// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. -func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { - client := &AppendBlobClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md deleted file mode 100644 index 92dc7e2d3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ /dev/null @@ -1,475 +0,0 @@ -# Code Generation - Azure Blob SDK for Golang - -### Settings - -```yaml -go: true -clear-output-folder: false -version: "^3.0.0" -license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" -credential-scope: "https://storage.azure.com/.default" -output-folder: ../generated -file-prefix: "zz_" -openapi-type: "data-plane" -verbose: true -security: AzureKey -modelerfour: - group-parameters: false - seal-single-value-enum-by-default: true - lenient-model-deduplication: true -export-clients: true -use: "@autorest/go@4.0.0-preview.61" -``` - -### Updating service version to 2023-11-03 -```yaml -directive: -- from: - - zz_appendblob_client.go - - zz_blob_client.go - - zz_blockblob_client.go - - zz_container_client.go - - zz_pageblob_client.go - - zz_service_client.go - where: $ - transform: >- - return $. - replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). - replaceAll(`2021-12-02`, `2023-11-03`); -``` - -### Undo breaking change with BlobName -``` yaml -directive: -- from: zz_models.go - where: $ - transform: >- - return $. - replace(/Name\s+\*BlobName/g, `Name *string`); -``` - -### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function -```yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; -``` - -### Remove pager methods and export various generated methods in container client - -``` yaml -directive: - - from: zz_container_client.go - where: $ - transform: >- - return $. - replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). - replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). - replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); -``` - -### Remove pager methods and export various generated methods in service client - -``` yaml -directive: - - from: zz_service_client.go - where: $ - transform: >- - return $. - replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). - replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). - replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); -``` - -### Fix BlobMetadata. - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.BlobMetadata["properties"]; - -``` - -### Don't include container name or blob in path - we have direct URIs. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('/{containerName}/{blob}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); - } - else if (property.includes('/{containerName}')) - { - $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); - } - } -``` - -### Remove DataLake stuff. - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"] - transform: > - for (const property in $) - { - if (property.includes('filesystem')) - { - delete $[property]; - } - } -``` - -### Remove DataLakeStorageError - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.DataLakeStorageError; -``` - -### Fix 304s - -``` yaml -directive: -- from: swagger-document - where: $["x-ms-paths"]["/{containerName}/{blob}"] - transform: > - $.get.responses["304"] = { - "description": "The condition specified using HTTP conditional header(s) is not met.", - "x-az-response-name": "ConditionNotMetError", - "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } - }; -``` - -### Fix GeoReplication - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.GeoReplication.properties.Status["x-ms-enum"]; - $.GeoReplication.properties.Status["x-ms-enum"] = { - "name": "BlobGeoReplicationStatus", - "modelAsString": false - }; -``` - -### Fix RehydratePriority - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - delete $.RehydratePriority["x-ms-enum"]; - $.RehydratePriority["x-ms-enum"] = { - "name": "RehydratePriority", - "modelAsString": false - }; -``` - -### Fix BlobDeleteType - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.BlobDeleteType.enum; - $.BlobDeleteType.enum = [ - "None", - "Permanent" - ]; -``` - -### Fix EncryptionAlgorithm - -``` yaml -directive: -- from: swagger-document - where: $.parameters - transform: > - delete $.EncryptionAlgorithm.enum; - $.EncryptionAlgorithm.enum = [ - "None", - "AES256" - ]; -``` - -### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" - -``` yaml -directive: -- from: swagger-document - where: $.definitions - transform: > - $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; - delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; -``` - -# Export various createRequest/HandleResponse methods - -``` yaml -directive: -- from: zz_container_client.go - where: $ - transform: >- - return $. - replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). - replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); - -- from: zz_pageblob_client.go - where: $ - transform: >- - return $. - replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). - replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); -``` - -### Clean up some const type names so they don't stutter - -``` yaml -directive: -- from: swagger-document - where: $.parameters['BlobDeleteType'] - transform: > - $["x-ms-enum"].name = "DeleteType"; - $["x-ms-client-name"] = "DeleteType"; - -- from: swagger-document - where: $.parameters['BlobExpiryOptions'] - transform: > - $["x-ms-enum"].name = "ExpiryOptions"; - $["x-ms-client-name"].name = "ExpiryOptions"; - -- from: swagger-document - where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] - transform: > - $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; - $.enum = [ "Mutable", "Unlocked", "Locked"]; - $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; - -- from: swagger-document - where: $.parameters['ImmutabilityPolicyMode'] - transform: > - $["x-ms-enum"].name = "ImmutabilityPolicySetting"; - $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; - -- from: swagger-document - where: $.definitions['BlobPropertiesInternal'] - transform: > - $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; -``` - -### use azcore.ETag - -``` yaml -directive: -- from: - - zz_models.go - - zz_options.go - where: $ - transform: >- - return $. - replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). - replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). - replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). - replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). - replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). - replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); - -- from: zz_response_types.go - where: $ - transform: >- - return $. - replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). - replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); - -- from: - - zz_appendblob_client.go - - zz_blob_client.go - - zz_blockblob_client.go - - zz_container_client.go - - zz_pageblob_client.go - where: $ - transform: >- - return $. - replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). - replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). - replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). - replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). - replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). - replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); -``` - -### Unsure why this casing changed, but fixing it - -``` yaml -directive: -- from: zz_models.go - where: $ - transform: >- - return $. - replace(/SignedOid\s+\*string/g, `SignedOID *string`). - replace(/SignedTid\s+\*string/g, `SignedTID *string`); -``` - -### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed - -``` yaml -directive: -- from: zz_constants.go - where: $ - transform: >- - return $. - replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"); -``` - -### Fix up x-ms-content-crc64 header response name - -``` yaml -directive: -- from: swagger-document - where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 - transform: > - $["x-ms-client-name"] = "ContentCRC64" -``` - -``` yaml -directive: -- rename-model: - from: BlobItemInternal - to: BlobItem -- rename-model: - from: BlobPropertiesInternal - to: BlobProperties -``` - -### Updating encoding URL, Golang adds '+' which disrupts encoding with service - -``` yaml -directive: - - from: zz_service_client.go - where: $ - transform: >- - return $. - replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) -``` - -### Change `where` parameter in blob filtering to be required - -``` yaml -directive: -- from: swagger-document - where: $.parameters.FilterBlobsWhere - transform: > - $.required = true; -``` - -### Change `Duration` parameter in leases to be required - -``` yaml -directive: -- from: swagger-document - where: $.parameters.LeaseDuration - transform: > - $.required = true; -``` - -### Change CPK acronym to be all caps - -``` yaml -directive: - - from: source-file-go - where: $ - transform: >- - return $. - replace(/Cpk/g, "CPK"); -``` - -### Change CORS acronym to be all caps - -``` yaml -directive: - - from: source-file-go - where: $ - transform: >- - return $. - replace(/Cors/g, "CORS"); -``` - -### Change cors xml to be correct - -``` yaml -directive: - - from: source-file-go - where: $ - transform: >- - return $. - replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); -``` - -### Fix Content-Type header in submit batch request - -``` yaml -directive: -- from: - - zz_container_client.go - - zz_service_client.go - where: $ - transform: >- - return $. - replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); -``` - -### Fix response status code check in submit batch request - -``` yaml -directive: -- from: zz_service_client.go - where: $ - transform: >- - return $. - replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, - `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); -``` - -### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers - -``` yaml -directive: -- from: - - zz_container_client.go - - zz_blob_client.go - - zz_appendblob_client.go - - zz_blockblob_client.go - - zz_pageblob_client.go - where: $ - transform: >- - return $. - replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, - `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). - replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, - `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). - replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, - `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). - replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, - `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). - replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, - `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go deleted file mode 100644 index 343073b2e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "time" -) - -// used to convert times from UTC to GMT before sending across the wire -var gmt = time.FixedZone("GMT", 0) - -func (client *BlobClient) Endpoint() string { - return client.endpoint -} - -func (client *BlobClient) InternalClient() *azcore.Client { - return client.internal -} - -func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) -} - -func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) -} - -// NewBlobClient creates a new instance of BlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. -func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { - client := &BlobClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go deleted file mode 100644 index 873d9a419..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -func (client *BlockBlobClient) Endpoint() string { - return client.endpoint -} - -func (client *BlockBlobClient) Internal() *azcore.Client { - return client.internal -} - -// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. -func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { - client := &BlockBlobClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go deleted file mode 100644 index 57f112001..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -//go:generate autorest ./autorest.md -//go:generate gofmt -w . - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go deleted file mode 100644 index 8f2bbbb7c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -const ServiceVersion = "2023-11-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go deleted file mode 100644 index d43b2c782..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -func (client *ContainerClient) Endpoint() string { - return client.endpoint -} - -func (client *ContainerClient) InternalClient() *azcore.Client { - return client.internal -} - -// NewContainerClient creates a new instance of ContainerClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { - client := &ContainerClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go deleted file mode 100644 index aaef9f53b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go +++ /dev/null @@ -1,141 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -import ( - "encoding/xml" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "net/url" -) - -type TransactionalContentSetter interface { - SetCRC64([]byte) - SetMD5([]byte) -} - -func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) { - a.TransactionalContentCRC64 = v -} - -func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) { - a.TransactionalContentMD5 = v -} - -func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) { - b.TransactionalContentCRC64 = v -} - -func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) { - b.TransactionalContentMD5 = v -} - -func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) { - p.TransactionalContentCRC64 = v -} - -func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { - p.TransactionalContentMD5 = v -} - -func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { - b.TransactionalContentCRC64 = v -} - -func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { - b.TransactionalContentMD5 = v -} - -type SourceContentSetter interface { - SetSourceContentCRC64(v []byte) - SetSourceContentMD5(v []byte) -} - -func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) { - a.SourceContentcrc64 = v -} - -func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) { - a.SourceContentMD5 = v -} - -func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) { - b.SourceContentcrc64 = v -} - -func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) { - b.SourceContentMD5 = v -} - -func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) { - p.SourceContentcrc64 = v -} - -func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { - p.SourceContentMD5 = v -} - -// Custom UnmarshalXML functions for types that need special handling. - -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. -func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobPrefix - aux := &struct { - *alias - BlobName *BlobName `xml:"Name"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - if aux.BlobName != nil { - if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { - name, err := url.QueryUnescape(*aux.BlobName.Content) - - // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) - if err != nil { - return err - } - b.Name = to.Ptr(string(name)) - } else { - b.Name = aux.BlobName.Content - } - } - return nil -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. -func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobItem - aux := &struct { - *alias - BlobName *BlobName `xml:"Name"` - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - if aux.BlobName != nil { - if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { - name, err := url.QueryUnescape(*aux.BlobName.Content) - - // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) - if err != nil { - return err - } - b.Name = to.Ptr(string(name)) - } else { - b.Name = aux.BlobName.Content - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go deleted file mode 100644 index a7c76208a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -func (client *PageBlobClient) Endpoint() string { - return client.endpoint -} - -func (client *PageBlobClient) InternalClient() *azcore.Client { - return client.internal -} - -// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. -func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { - client := &PageBlobClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go deleted file mode 100644 index 32c15a2b0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" -) - -func (client *ServiceClient) Endpoint() string { - return client.endpoint -} - -func (client *ServiceClient) InternalClient() *azcore.Client { - return client.internal -} - -// NewServiceClient creates a new instance of ServiceClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. -func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { - client := &ServiceClient{ - internal: azClient, - endpoint: endpoint, - } - return client -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go deleted file mode 100644 index 797318611..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ /dev/null @@ -1,662 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -// AppendBlobClient contains the methods for the AppendBlob group. -// Don't use this type directly, use a constructor function instead. -type AppendBlobClient struct { - internal *azcore.Client - endpoint string -} - -// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append -// Block operation is permitted only if the blob was created with x-ms-blob-type set to -// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - body - Initial data -// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { - var err error - req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return AppendBlobClientAppendBlockResponse{}, err - } - resp, err := client.appendBlockHandleResponse(httpResp) - return resp, err -} - -// appendBlockCreateRequest creates the AppendBlock request. -func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, "application/octet-stream"); err != nil { - return nil, err - } - return req, nil -} - -// appendBlockHandleResponse handles the AppendBlock response. -func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { - result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where -// the contents are read from a source url. The Append Block operation is permitted only if the blob was -// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - sourceURL - Specify a URL to the copy source. -// - contentLength - The length of the request. -// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { - var err error - req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - resp, err := client.appendBlockFromURLHandleResponse(httpResp) - return resp, err -} - -// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. -func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "appendblock") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. -func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { - result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Create - The Create Append Blob operation creates a new append blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { - var err error - req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return AppendBlobClientCreateResponse{}, err - } - resp, err := client.createHandleResponse(httpResp) - return resp, err -} - -// createCreateRequest creates the Create request. -func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { - result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version -// or later. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { - var err error - req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return AppendBlobClientSealResponse{}, err - } - resp, err := client.sealHandleResponse(httpResp) - return resp, err -} - -// sealCreateRequest creates the Seal request. -func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "seal") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// sealHandleResponse handles the Seal response. -func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { - result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.IsSealed = &isSealed - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go deleted file mode 100644 index fe568a96c..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ /dev/null @@ -1,2962 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "net/http" - "strconv" - "strings" - "time" -) - -// BlobClient contains the methods for the Blob group. -// Don't use this type directly, use a constructor function instead. -type BlobClient struct { - internal *azcore.Client - endpoint string -} - -// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination -// blob with zero length and full metadata. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. -// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { - var err error - req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) - if err != nil { - return BlobClientAbortCopyFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientAbortCopyFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { - err = runtime.NewResponseError(httpResp) - return BlobClientAbortCopyFromURLResponse{}, err - } - resp, err := client.abortCopyFromURLHandleResponse(httpResp) - return resp, err -} - -// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. -func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "copy") - reqQP.Set("copyid", copyID) - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-copy-action"] = []string{"abort"} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. -func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { - result := BlobClientAbortCopyFromURLResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAbortCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite -// lease can be between 15 and 60 seconds. A lease duration cannot be changed using -// renew or change. -// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { - var err error - req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlobClientAcquireLeaseResponse{}, err - } - resp, err := client.acquireLeaseHandleResponse(httpResp) - return resp, err -} - -// acquireLeaseCreateRequest creates the AcquireLease request. -func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// acquireLeaseHandleResponse handles the AcquireLease response. -func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { - result := BlobClientAcquireLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { - var err error - req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return BlobClientBreakLeaseResponse{}, err - } - resp, err := client.breakLeaseHandleResponse(httpResp) - return resp, err -} - -// breakLeaseCreateRequest creates the BreakLease request. -func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// breakLeaseHandleResponse handles the BreakLease response. -func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { - result := BlobClientBreakLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-time"); val != "" { - leaseTime32, err := strconv.ParseInt(val, 10, 32) - leaseTime := int32(leaseTime32) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.LeaseTime = &leaseTime - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { - var err error - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientChangeLeaseResponse{}, err - } - resp, err := client.changeLeaseHandleResponse(httpResp) - return resp, err -} - -// changeLeaseCreateRequest creates the ChangeLease request. -func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// changeLeaseHandleResponse handles the ChangeLease response. -func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { - result := BlobClientChangeLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response -// until the copy is complete. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { - var err error - req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return BlobClientCopyFromURLResponse{}, err - } - resp, err := client.copyFromURLHandleResponse(httpResp) - return resp, err -} - -// copyFromURLCreateRequest creates the CopyFromURL request. -func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-requires-sync"] = []string{"true"} - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// copyFromURLHandleResponse handles the CopyFromURL response. -func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { - result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { - var err error - req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlobClientCreateSnapshotResponse{}, err - } - resp, err := client.createSnapshotHandleResponse(httpResp) - return resp, err -} - -// createSnapshotCreateRequest creates the CreateSnapshot request. -func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "snapshot") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// createSnapshotHandleResponse handles the CreateSnapshot response. -func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { - result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed -// from the storage account. If the storage account's soft delete feature is enabled, -// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service -// retains the blob or snapshot for the number of days specified by the -// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number -// of days has passed, the blob's data is permanently removed from the storage -// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use -// the List Blobs API and specify the "include=deleted" query parameter to discover -// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. -// All other operations on a soft-deleted blob or snapshot causes the service to -// return an HTTP status code of 404 (ResourceNotFound). -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { - var err error - req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return BlobClientDeleteResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientDeleteResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return BlobClientDeleteResponse{}, err - } - resp, err := client.deleteHandleResponse(httpResp) - return resp, err -} - -// deleteCreateRequest creates the Delete request. -func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.DeleteType != nil { - reqQP.Set("deletetype", string(*options.DeleteType)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.DeleteSnapshots != nil { - req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// deleteHandleResponse handles the Delete response. -func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { - result := BlobClientDeleteResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDeleteResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { - var err error - req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) - if err != nil { - return BlobClientDeleteImmutabilityPolicyResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientDeleteImmutabilityPolicyResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientDeleteImmutabilityPolicyResponse{}, err - } - resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) - return resp, err -} - -// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. -func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "immutabilityPolicies") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. -func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { - result := BlobClientDeleteImmutabilityPolicyResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDeleteImmutabilityPolicyResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You -// can also call Download to read a snapshot. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { - var err error - req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) - if err != nil { - return BlobClientDownloadResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientDownloadResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - err = runtime.NewResponseError(httpResp) - return BlobClientDownloadResponse{}, err - } - resp, err := client.downloadHandleResponse(httpResp) - return resp, err -} - -// downloadCreateRequest creates the Download request. -func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - runtime.SkipBodyDownload(req) - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.RangeGetContentMD5 != nil { - req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} - } - if options != nil && options.RangeGetContentCRC64 != nil { - req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// downloadHandleResponse handles the Download response. -func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { - result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobContentMD5 = blobContentMD5 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.ContentLength = &contentLength - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.CreationTime = &creationTime - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val - } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn - } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) - } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsSealed = &isSealed - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-last-access-time"); val != "" { - lastAccessed, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.LastAccessed = &lastAccessed - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-legal-hold"); val != "" { - legalHold, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.LegalHold = &legalHold - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// GetAccountInfo - Returns the sku name and account kind -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { - var err error - req, err := client.getAccountInfoCreateRequest(ctx, options) - if err != nil { - return BlobClientGetAccountInfoResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientGetAccountInfoResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientGetAccountInfoResponse{}, err - } - resp, err := client.getAccountInfoHandleResponse(httpResp) - return resp, err -} - -// getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") - reqQP.Set("comp", "properties") - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { - result := BlobClientGetAccountInfoResponse{} - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetAccountInfoResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties -// for the blob. It does not return the content of the blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { - var err error - req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientGetPropertiesResponse{}, err - } - resp, err := client.getPropertiesHandleResponse(httpResp) - return resp, err -} - -// getPropertiesCreateRequest creates the GetProperties request. -func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getPropertiesHandleResponse handles the GetProperties response. -func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { - result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val - } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.AccessTierChangeTime = &accessTierChangeTime - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.ContentLength = &contentLength - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.CreationTime = &creationTime - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { - result.DestinationSnapshot = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.ExpiresOn = &expiresOn - } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn - } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) - } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsIncrementalCopy = &isIncrementalCopy - } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsSealed = &isSealed - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-last-access-time"); val != "" { - lastAccessed, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.LastAccessed = &lastAccessed - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-legal-hold"); val != "" { - legalHold, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.LegalHold = &legalHold - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { - result.RehydratePriority = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// GetTags - The Get Tags operation enables users to get the tags associated with a blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { - var err error - req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) - if err != nil { - return BlobClientGetTagsResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientGetTagsResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientGetTagsResponse{}, err - } - resp, err := client.getTagsHandleResponse(httpResp) - return resp, err -} - -// getTagsCreateRequest creates the GetTags request. -func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "tags") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getTagsHandleResponse handles the GetTags response. -func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { - result := BlobClientGetTagsResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetTagsResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { - return BlobClientGetTagsResponse{}, err - } - return result, nil -} - -// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { - var err error - req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) - if err != nil { - return BlobClientQueryResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientQueryResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { - err = runtime.NewResponseError(httpResp) - return BlobClientQueryResponse{}, err - } - resp, err := client.queryHandleResponse(httpResp) - return resp, err -} - -// queryCreateRequest creates the Query request. -func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "query") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - runtime.SkipBodyDownload(req) - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if options != nil && options.QueryRequest != nil { - if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { - return nil, err - } - return req, nil - } - return req, nil -} - -// queryHandleResponse handles the Query response. -func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { - result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobContentMD5 = blobContentMD5 - } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentLength = &contentLength - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { - var err error - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientReleaseLeaseResponse{}, err - } - resp, err := client.releaseLeaseHandleResponse(httpResp) - return resp, err -} - -// releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { - result := BlobClientReleaseLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { - var err error - req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientRenewLeaseResponse{}, err - } - resp, err := client.renewLeaseHandleResponse(httpResp) - return resp, err -} - -// renewLeaseCreateRequest creates the RenewLease request. -func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// renewLeaseHandleResponse handles the RenewLease response. -func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { - result := BlobClientRenewLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetExpiry - Sets the time a blob will expire and be deleted. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - expiryOptions - Required. Indicates mode of the expiry time -// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { - var err error - req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetExpiryResponse{}, err - } - resp, err := client.setExpiryHandleResponse(httpResp) - return resp, err -} - -// setExpiryCreateRequest creates the SetExpiry request. -func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "expiry") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} - if options != nil && options.ExpiresOn != nil { - req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setExpiryHandleResponse handles the SetExpiry response. -func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { - result := BlobClientSetExpiryResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { - var err error - req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetHTTPHeadersResponse{}, err - } - resp, err := client.setHTTPHeadersHandleResponse(httpResp) - return resp, err -} - -// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. -func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. -func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { - result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { - var err error - req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) - if err != nil { - return BlobClientSetImmutabilityPolicyResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetImmutabilityPolicyResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetImmutabilityPolicyResponse{}, err - } - resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) - return resp, err -} - -// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. -func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "immutabilityPolicies") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. -func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { - result := BlobClientSetImmutabilityPolicyResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetImmutabilityPolicyResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetImmutabilityPolicyResponse{}, err - } - result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry - } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - legalHold - Specified if a legal hold should be set on the blob. -// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { - var err error - req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) - if err != nil { - return BlobClientSetLegalHoldResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetLegalHoldResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetLegalHoldResponse{}, err - } - resp, err := client.setLegalHoldHandleResponse(httpResp) - return resp, err -} - -// setLegalHoldCreateRequest creates the SetLegalHold request. -func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "legalhold") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setLegalHoldHandleResponse handles the SetLegalHold response. -func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { - result := BlobClientSetLegalHoldResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetLegalHoldResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-legal-hold"); val != "" { - legalHold, err := strconv.ParseBool(val) - if err != nil { - return BlobClientSetLegalHoldResponse{}, err - } - result.LegalHold = &legalHold - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value -// pairs -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { - var err error - req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetMetadataResponse{}, err - } - resp, err := client.setMetadataHandleResponse(httpResp) - return resp, err -} - -// setMetadataCreateRequest creates the SetMetadata request. -func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "metadata") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setMetadataHandleResponse handles the SetMetadata response. -func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { - result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// SetTags - The Set Tags operation enables users to set tags on a blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - tags - Blob tags -// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { - var err error - req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) - if err != nil { - return BlobClientSetTagsResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetTagsResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetTagsResponse{}, err - } - resp, err := client.setTagsHandleResponse(httpResp) - return resp, err -} - -// setTagsCreateRequest creates the SetTags request. -func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "tags") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := runtime.MarshalAsXML(req, tags); err != nil { - return nil, err - } - return req, nil -} - -// setTagsHandleResponse handles the SetTags response. -func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { - result := BlobClientSetTagsResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetTagsResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage -// account and on a block blob in a blob storage account (locally redundant storage only). A -// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive -// storage type. This operation does not update the blob's ETag. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - tier - Indicates the tier to be set on the blob. -// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { - var err error - req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return BlobClientSetTierResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientSetTierResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return BlobClientSetTierResponse{}, err - } - resp, err := client.setTierHandleResponse(httpResp) - return resp, err -} - -// setTierCreateRequest creates the SetTier request. -func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "tier") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} - if options != nil && options.RehydratePriority != nil { - req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setTierHandleResponse handles the SetTier response. -func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { - result := BlobClientSetTierResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { - var err error - req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return BlobClientStartCopyFromURLResponse{}, err - } - resp, err := client.startCopyFromURLHandleResponse(httpResp) - return resp, err -} - -// startCopyFromURLCreateRequest creates the StartCopyFromURL request. -func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if options != nil && options.RehydratePriority != nil { - req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.SealBlob != nil { - req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// startCopyFromURLHandleResponse handles the StartCopyFromURL response. -func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { - result := BlobClientStartCopyFromURLResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// Undelete - Undelete a blob that was previously soft deleted -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { - var err error - req, err := client.undeleteCreateRequest(ctx, options) - if err != nil { - return BlobClientUndeleteResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlobClientUndeleteResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlobClientUndeleteResponse{}, err - } - resp, err := client.undeleteHandleResponse(httpResp) - return resp, err -} - -// undeleteCreateRequest creates the Undelete request. -func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "undelete") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// undeleteHandleResponse handles the Undelete response. -func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { - result := BlobClientUndeleteResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientUndeleteResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go deleted file mode 100644 index b6115b50a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ /dev/null @@ -1,993 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -// BlockBlobClient contains the methods for the BlockBlob group. -// Don't use this type directly, use a constructor function instead. -type BlockBlobClient struct { - internal *azcore.Client - endpoint string -} - -// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written to the -// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that -// have changed, then committing the new and existing blocks together. You can do -// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit -// the most recently uploaded version of the block, whichever list it may -// belong to. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - blocks - Blob Blocks. -// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList -// method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { - var err error - req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientCommitBlockListResponse{}, err - } - resp, err := client.commitBlockListHandleResponse(httpResp) - return resp, err -} - -// commitBlockListCreateRequest creates the CommitBlockList request. -func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := runtime.MarshalAsXML(req, blocks); err != nil { - return nil, err - } - return req, nil -} - -// commitBlockListHandleResponse handles the CommitBlockList response. -func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { - result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientCommitBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. -// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { - var err error - req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientGetBlockListResponse{}, err - } - resp, err := client.getBlockListHandleResponse(httpResp) - return resp, err -} - -// getBlockListCreateRequest creates the GetBlockList request. -func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blocklist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - reqQP.Set("blocklisttype", string(listType)) - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getBlockListHandleResponse handles the GetBlockList response. -func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { - result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - return result, nil -} - -// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from -// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not -// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform -// partial updates to a block blob’s contents using a source URL, use the Put -// Block from URL API in conjunction with Put Block List. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL -// method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { - var err error - req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientPutBlobFromURLResponse{}, err - } - resp, err := client.putBlobFromURLHandleResponse(httpResp) - return resp, err -} - -// putBlobFromURLCreateRequest creates the PutBlobFromURL request. -func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { - req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if options != nil && options.CopySourceBlobProperties != nil { - req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// putBlobFromURLHandleResponse handles the PutBlobFromURL response. -func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { - result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// - contentLength - The length of the request. -// - body - Initial data -// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { - var err error - req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientStageBlockResponse{}, err - } - resp, err := client.stageBlockHandleResponse(httpResp) - return resp, err -} - -// stageBlockCreateRequest creates the StageBlock request. -func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, "application/octet-stream"); err != nil { - return nil, err - } - return req, nil -} - -// stageBlockHandleResponse handles the StageBlock response. -func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { - result := BlockBlobClientStageBlockResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents -// are read from a URL. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// - contentLength - The length of the request. -// - sourceURL - Specify a URL to the copy source. -// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL -// method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { - var err error - req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientStageBlockFromURLResponse{}, err - } - resp, err := client.stageBlockFromURLHandleResponse(httpResp) - return resp, err -} - -// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. -func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") - reqQP.Set("blockid", blockID) - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. -func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { - result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob -// overwrites any existing metadata on the blob. Partial updates are not supported with Put -// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of -// the content of a block blob, use the Put Block List operation. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - body - Initial data -// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { - var err error - req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return BlockBlobClientUploadResponse{}, err - } - resp, err := client.uploadHandleResponse(httpResp) - return resp, err -} - -// uploadCreateRequest creates the Upload request. -func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, "application/octet-stream"); err != nil { - return nil, err - } - return req, nil -} - -// uploadHandleResponse handles the Upload response. -func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { - result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go deleted file mode 100644 index 95af9e154..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ /dev/null @@ -1,747 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -type AccessTier string - -const ( - AccessTierArchive AccessTier = "Archive" - AccessTierCold AccessTier = "Cold" - AccessTierCool AccessTier = "Cool" - AccessTierHot AccessTier = "Hot" - AccessTierP10 AccessTier = "P10" - AccessTierP15 AccessTier = "P15" - AccessTierP20 AccessTier = "P20" - AccessTierP30 AccessTier = "P30" - AccessTierP4 AccessTier = "P4" - AccessTierP40 AccessTier = "P40" - AccessTierP50 AccessTier = "P50" - AccessTierP6 AccessTier = "P6" - AccessTierP60 AccessTier = "P60" - AccessTierP70 AccessTier = "P70" - AccessTierP80 AccessTier = "P80" - AccessTierPremium AccessTier = "Premium" -) - -// PossibleAccessTierValues returns the possible values for the AccessTier const type. -func PossibleAccessTierValues() []AccessTier { - return []AccessTier{ - AccessTierArchive, - AccessTierCold, - AccessTierCool, - AccessTierHot, - AccessTierP10, - AccessTierP15, - AccessTierP20, - AccessTierP30, - AccessTierP4, - AccessTierP40, - AccessTierP50, - AccessTierP6, - AccessTierP60, - AccessTierP70, - AccessTierP80, - AccessTierPremium, - } -} - -type AccountKind string - -const ( - AccountKindBlobStorage AccountKind = "BlobStorage" - AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" - AccountKindFileStorage AccountKind = "FileStorage" - AccountKindStorage AccountKind = "Storage" - AccountKindStorageV2 AccountKind = "StorageV2" -) - -// PossibleAccountKindValues returns the possible values for the AccountKind const type. -func PossibleAccountKindValues() []AccountKind { - return []AccountKind{ - AccountKindBlobStorage, - AccountKindBlockBlobStorage, - AccountKindFileStorage, - AccountKindStorage, - AccountKindStorageV2, - } -} - -type ArchiveStatus string - -const ( - ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" - ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" - ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" -) - -// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. -func PossibleArchiveStatusValues() []ArchiveStatus { - return []ArchiveStatus{ - ArchiveStatusRehydratePendingToCold, - ArchiveStatusRehydratePendingToCool, - ArchiveStatusRehydratePendingToHot, - } -} - -type BlobCopySourceTags string - -const ( - BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" - BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" -) - -// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. -func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { - return []BlobCopySourceTags{ - BlobCopySourceTagsCOPY, - BlobCopySourceTagsREPLACE, - } -} - -// BlobGeoReplicationStatus - The status of the secondary location -type BlobGeoReplicationStatus string - -const ( - BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" - BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" -) - -// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. -func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { - return []BlobGeoReplicationStatus{ - BlobGeoReplicationStatusBootstrap, - BlobGeoReplicationStatusLive, - BlobGeoReplicationStatusUnavailable, - } -} - -type BlobType string - -const ( - BlobTypeAppendBlob BlobType = "AppendBlob" - BlobTypeBlockBlob BlobType = "BlockBlob" - BlobTypePageBlob BlobType = "PageBlob" -) - -// PossibleBlobTypeValues returns the possible values for the BlobType const type. -func PossibleBlobTypeValues() []BlobType { - return []BlobType{ - BlobTypeAppendBlob, - BlobTypeBlockBlob, - BlobTypePageBlob, - } -} - -type BlockListType string - -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. -func PossibleBlockListTypeValues() []BlockListType { - return []BlockListType{ - BlockListTypeAll, - BlockListTypeCommitted, - BlockListTypeUncommitted, - } -} - -type CopyStatusType string - -const ( - CopyStatusTypeAborted CopyStatusType = "aborted" - CopyStatusTypeFailed CopyStatusType = "failed" - CopyStatusTypePending CopyStatusType = "pending" - CopyStatusTypeSuccess CopyStatusType = "success" -) - -// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. -func PossibleCopyStatusTypeValues() []CopyStatusType { - return []CopyStatusType{ - CopyStatusTypeAborted, - CopyStatusTypeFailed, - CopyStatusTypePending, - CopyStatusTypeSuccess, - } -} - -type DeleteSnapshotsOptionType string - -const ( - DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" - DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" -) - -// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. -func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { - return []DeleteSnapshotsOptionType{ - DeleteSnapshotsOptionTypeInclude, - DeleteSnapshotsOptionTypeOnly, - } -} - -type DeleteType string - -const ( - DeleteTypeNone DeleteType = "None" - DeleteTypePermanent DeleteType = "Permanent" -) - -// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. -func PossibleDeleteTypeValues() []DeleteType { - return []DeleteType{ - DeleteTypeNone, - DeleteTypePermanent, - } -} - -type EncryptionAlgorithmType string - -const ( - EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" -) - -// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. -func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { - return []EncryptionAlgorithmType{ - EncryptionAlgorithmTypeAES256, - EncryptionAlgorithmTypeNone, - } -} - -type ExpiryOptions string - -const ( - ExpiryOptionsAbsolute ExpiryOptions = "Absolute" - ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" - ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" - ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" -) - -// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. -func PossibleExpiryOptionsValues() []ExpiryOptions { - return []ExpiryOptions{ - ExpiryOptionsAbsolute, - ExpiryOptionsNeverExpire, - ExpiryOptionsRelativeToCreation, - ExpiryOptionsRelativeToNow, - } -} - -type FilterBlobsIncludeItem string - -const ( - FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" - FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" -) - -// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. -func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { - return []FilterBlobsIncludeItem{ - FilterBlobsIncludeItemNone, - FilterBlobsIncludeItemVersions, - } -} - -type ImmutabilityPolicyMode string - -const ( - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" - ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" - ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" -) - -// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. -func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { - return []ImmutabilityPolicyMode{ - ImmutabilityPolicyModeLocked, - ImmutabilityPolicyModeMutable, - ImmutabilityPolicyModeUnlocked, - } -} - -type ImmutabilityPolicySetting string - -const ( - ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" -) - -// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. -func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { - return []ImmutabilityPolicySetting{ - ImmutabilityPolicySettingLocked, - ImmutabilityPolicySettingUnlocked, - } -} - -type LeaseDurationType string - -const ( - LeaseDurationTypeFixed LeaseDurationType = "fixed" - LeaseDurationTypeInfinite LeaseDurationType = "infinite" -) - -// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. -func PossibleLeaseDurationTypeValues() []LeaseDurationType { - return []LeaseDurationType{ - LeaseDurationTypeFixed, - LeaseDurationTypeInfinite, - } -} - -type LeaseStateType string - -const ( - LeaseStateTypeAvailable LeaseStateType = "available" - LeaseStateTypeBreaking LeaseStateType = "breaking" - LeaseStateTypeBroken LeaseStateType = "broken" - LeaseStateTypeExpired LeaseStateType = "expired" - LeaseStateTypeLeased LeaseStateType = "leased" -) - -// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. -func PossibleLeaseStateTypeValues() []LeaseStateType { - return []LeaseStateType{ - LeaseStateTypeAvailable, - LeaseStateTypeBreaking, - LeaseStateTypeBroken, - LeaseStateTypeExpired, - LeaseStateTypeLeased, - } -} - -type LeaseStatusType string - -const ( - LeaseStatusTypeLocked LeaseStatusType = "locked" - LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" -) - -// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. -func PossibleLeaseStatusTypeValues() []LeaseStatusType { - return []LeaseStatusType{ - LeaseStatusTypeLocked, - LeaseStatusTypeUnlocked, - } -} - -type ListBlobsIncludeItem string - -const ( - ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" - ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" - ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" - ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" - ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" - ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" - ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" - ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" - ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" -) - -// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. -func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { - return []ListBlobsIncludeItem{ - ListBlobsIncludeItemCopy, - ListBlobsIncludeItemDeleted, - ListBlobsIncludeItemDeletedwithversions, - ListBlobsIncludeItemImmutabilitypolicy, - ListBlobsIncludeItemLegalhold, - ListBlobsIncludeItemMetadata, - ListBlobsIncludeItemSnapshots, - ListBlobsIncludeItemTags, - ListBlobsIncludeItemUncommittedblobs, - ListBlobsIncludeItemVersions, - } -} - -type ListContainersIncludeType string - -const ( - ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" - ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" - ListContainersIncludeTypeSystem ListContainersIncludeType = "system" -) - -// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. -func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { - return []ListContainersIncludeType{ - ListContainersIncludeTypeDeleted, - ListContainersIncludeTypeMetadata, - ListContainersIncludeTypeSystem, - } -} - -type PremiumPageBlobAccessTier string - -const ( - PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" - PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" - PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" - PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" - PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" - PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" - PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" - PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" - PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" - PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" - PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" -) - -// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. -func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { - return []PremiumPageBlobAccessTier{ - PremiumPageBlobAccessTierP10, - PremiumPageBlobAccessTierP15, - PremiumPageBlobAccessTierP20, - PremiumPageBlobAccessTierP30, - PremiumPageBlobAccessTierP4, - PremiumPageBlobAccessTierP40, - PremiumPageBlobAccessTierP50, - PremiumPageBlobAccessTierP6, - PremiumPageBlobAccessTierP60, - PremiumPageBlobAccessTierP70, - PremiumPageBlobAccessTierP80, - } -} - -type PublicAccessType string - -const ( - PublicAccessTypeBlob PublicAccessType = "blob" - PublicAccessTypeContainer PublicAccessType = "container" -) - -// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. -func PossiblePublicAccessTypeValues() []PublicAccessType { - return []PublicAccessType{ - PublicAccessTypeBlob, - PublicAccessTypeContainer, - } -} - -// QueryFormatType - The quick query format type. -type QueryFormatType string - -const ( - QueryFormatTypeArrow QueryFormatType = "arrow" - QueryFormatTypeDelimited QueryFormatType = "delimited" - QueryFormatTypeJSON QueryFormatType = "json" - QueryFormatTypeParquet QueryFormatType = "parquet" -) - -// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. -func PossibleQueryFormatTypeValues() []QueryFormatType { - return []QueryFormatType{ - QueryFormatTypeArrow, - QueryFormatTypeDelimited, - QueryFormatTypeJSON, - QueryFormatTypeParquet, - } -} - -// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. -// Valid values are High and Standard. -type RehydratePriority string - -const ( - RehydratePriorityHigh RehydratePriority = "High" - RehydratePriorityStandard RehydratePriority = "Standard" -) - -// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. -func PossibleRehydratePriorityValues() []RehydratePriority { - return []RehydratePriority{ - RehydratePriorityHigh, - RehydratePriorityStandard, - } -} - -type SKUName string - -const ( - SKUNamePremiumLRS SKUName = "Premium_LRS" - SKUNameStandardGRS SKUName = "Standard_GRS" - SKUNameStandardLRS SKUName = "Standard_LRS" - SKUNameStandardRAGRS SKUName = "Standard_RAGRS" - SKUNameStandardZRS SKUName = "Standard_ZRS" -) - -// PossibleSKUNameValues returns the possible values for the SKUName const type. -func PossibleSKUNameValues() []SKUName { - return []SKUName{ - SKUNamePremiumLRS, - SKUNameStandardGRS, - SKUNameStandardLRS, - SKUNameStandardRAGRS, - SKUNameStandardZRS, - } -} - -type SequenceNumberActionType string - -const ( - SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" - SequenceNumberActionTypeMax SequenceNumberActionType = "max" - SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" -) - -// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. -func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { - return []SequenceNumberActionType{ - SequenceNumberActionTypeIncrement, - SequenceNumberActionTypeMax, - SequenceNumberActionTypeUpdate, - } -} - -// StorageErrorCode - Error codes returned by the service -type StorageErrorCode string - -const ( - StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" - StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" - StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" - StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" - StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" - StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" - StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" - StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" - StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" - StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" - StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" - StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" - StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" - StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" - StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" - StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" - StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" - StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" - StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" - StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" - StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" - StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" - StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" - StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" - StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" - StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" - StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" - StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" - StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" - StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" - StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" - StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" - StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" - StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" - StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" - StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" - StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" - StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" - StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" - StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" - StorageErrorCodeInternalError StorageErrorCode = "InternalError" - StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" - StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" - StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" - StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" - StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" - StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" - StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" - StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" - StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" - StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" - StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" - StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" - StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" - StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" - StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" - StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" - StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" - StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" - StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" - StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" - StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" - StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" - StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" - StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" - StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" - StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" - StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" - StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" - StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" - StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" - StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" - StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" - StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" - StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" - StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" - StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" - StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" - StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" - StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" - StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" - StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" - StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" - StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" - StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" - StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" - StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" - StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" - StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" - StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" - StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" - StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" - StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" - StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" - StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" - StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" - StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" - StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" - StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" - StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" - StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" - StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" - StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" - StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" - StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" - StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" - StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" - StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" - StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" - StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" - StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" - StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" - StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" -) - -// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. -func PossibleStorageErrorCodeValues() []StorageErrorCode { - return []StorageErrorCode{ - StorageErrorCodeAccountAlreadyExists, - StorageErrorCodeAccountBeingCreated, - StorageErrorCodeAccountIsDisabled, - StorageErrorCodeAppendPositionConditionNotMet, - StorageErrorCodeAuthenticationFailed, - StorageErrorCodeAuthorizationFailure, - StorageErrorCodeAuthorizationPermissionMismatch, - StorageErrorCodeAuthorizationProtocolMismatch, - StorageErrorCodeAuthorizationResourceTypeMismatch, - StorageErrorCodeAuthorizationServiceMismatch, - StorageErrorCodeAuthorizationSourceIPMismatch, - StorageErrorCodeBlobAlreadyExists, - StorageErrorCodeBlobArchived, - StorageErrorCodeBlobBeingRehydrated, - StorageErrorCodeBlobImmutableDueToPolicy, - StorageErrorCodeBlobNotArchived, - StorageErrorCodeBlobNotFound, - StorageErrorCodeBlobOverwritten, - StorageErrorCodeBlobTierInadequateForContentLength, - StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, - StorageErrorCodeBlockCountExceedsLimit, - StorageErrorCodeBlockListTooLong, - StorageErrorCodeCannotChangeToLowerTier, - StorageErrorCodeCannotVerifyCopySource, - StorageErrorCodeConditionHeadersNotSupported, - StorageErrorCodeConditionNotMet, - StorageErrorCodeContainerAlreadyExists, - StorageErrorCodeContainerBeingDeleted, - StorageErrorCodeContainerDisabled, - StorageErrorCodeContainerNotFound, - StorageErrorCodeContentLengthLargerThanTierLimit, - StorageErrorCodeCopyAcrossAccountsNotSupported, - StorageErrorCodeCopyIDMismatch, - StorageErrorCodeEmptyMetadataKey, - StorageErrorCodeFeatureVersionMismatch, - StorageErrorCodeIncrementalCopyBlobMismatch, - StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, - StorageErrorCodeIncrementalCopySourceMustBeSnapshot, - StorageErrorCodeInfiniteLeaseDurationRequired, - StorageErrorCodeInsufficientAccountPermissions, - StorageErrorCodeInternalError, - StorageErrorCodeInvalidAuthenticationInfo, - StorageErrorCodeInvalidBlobOrBlock, - StorageErrorCodeInvalidBlobTier, - StorageErrorCodeInvalidBlobType, - StorageErrorCodeInvalidBlockID, - StorageErrorCodeInvalidBlockList, - StorageErrorCodeInvalidHTTPVerb, - StorageErrorCodeInvalidHeaderValue, - StorageErrorCodeInvalidInput, - StorageErrorCodeInvalidMD5, - StorageErrorCodeInvalidMetadata, - StorageErrorCodeInvalidOperation, - StorageErrorCodeInvalidPageRange, - StorageErrorCodeInvalidQueryParameterValue, - StorageErrorCodeInvalidRange, - StorageErrorCodeInvalidResourceName, - StorageErrorCodeInvalidSourceBlobType, - StorageErrorCodeInvalidSourceBlobURL, - StorageErrorCodeInvalidURI, - StorageErrorCodeInvalidVersionForPageBlobOperation, - StorageErrorCodeInvalidXMLDocument, - StorageErrorCodeInvalidXMLNodeValue, - StorageErrorCodeLeaseAlreadyBroken, - StorageErrorCodeLeaseAlreadyPresent, - StorageErrorCodeLeaseIDMismatchWithBlobOperation, - StorageErrorCodeLeaseIDMismatchWithContainerOperation, - StorageErrorCodeLeaseIDMismatchWithLeaseOperation, - StorageErrorCodeLeaseIDMissing, - StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, - StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, - StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, - StorageErrorCodeLeaseLost, - StorageErrorCodeLeaseNotPresentWithBlobOperation, - StorageErrorCodeLeaseNotPresentWithContainerOperation, - StorageErrorCodeLeaseNotPresentWithLeaseOperation, - StorageErrorCodeMD5Mismatch, - StorageErrorCodeMaxBlobSizeConditionNotMet, - StorageErrorCodeMetadataTooLarge, - StorageErrorCodeMissingContentLengthHeader, - StorageErrorCodeMissingRequiredHeader, - StorageErrorCodeMissingRequiredQueryParameter, - StorageErrorCodeMissingRequiredXMLNode, - StorageErrorCodeMultipleConditionHeadersNotSupported, - StorageErrorCodeNoAuthenticationInformation, - StorageErrorCodeNoPendingCopyOperation, - StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, - StorageErrorCodeOperationTimedOut, - StorageErrorCodeOutOfRangeInput, - StorageErrorCodeOutOfRangeQueryParameterValue, - StorageErrorCodePendingCopyOperation, - StorageErrorCodePreviousSnapshotCannotBeNewer, - StorageErrorCodePreviousSnapshotNotFound, - StorageErrorCodePreviousSnapshotOperationNotSupported, - StorageErrorCodeRequestBodyTooLarge, - StorageErrorCodeRequestURLFailedToParse, - StorageErrorCodeResourceAlreadyExists, - StorageErrorCodeResourceNotFound, - StorageErrorCodeResourceTypeMismatch, - StorageErrorCodeSequenceNumberConditionNotMet, - StorageErrorCodeSequenceNumberIncrementTooLarge, - StorageErrorCodeServerBusy, - StorageErrorCodeSnapshotCountExceeded, - StorageErrorCodeSnapshotOperationRateExceeded, - StorageErrorCodeSnapshotsPresent, - StorageErrorCodeSourceConditionNotMet, - StorageErrorCodeSystemInUse, - StorageErrorCodeTargetConditionNotMet, - StorageErrorCodeUnauthorizedBlobOverwrite, - StorageErrorCodeUnsupportedHTTPVerb, - StorageErrorCodeUnsupportedHeader, - StorageErrorCodeUnsupportedQueryParameter, - StorageErrorCodeUnsupportedXMLNode, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go deleted file mode 100644 index dbc2a293e..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ /dev/null @@ -1,1591 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "encoding/xml" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "io" - "net/http" - "strconv" - "strings" - "time" -) - -// ContainerClient contains the methods for the Container group. -// Don't use this type directly, use a constructor function instead. -type ContainerClient struct { - internal *azcore.Client - endpoint string -} - -// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite -// lease can be between 15 and 60 seconds. A lease duration cannot be changed using -// renew or change. -// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { - var err error - req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return ContainerClientAcquireLeaseResponse{}, err - } - resp, err := client.acquireLeaseHandleResponse(httpResp) - return resp, err -} - -// acquireLeaseCreateRequest creates the AcquireLease request. -func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// acquireLeaseHandleResponse handles the AcquireLease response. -func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { - result := ContainerClientAcquireLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { - var err error - req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return ContainerClientBreakLeaseResponse{}, err - } - resp, err := client.breakLeaseHandleResponse(httpResp) - return resp, err -} - -// breakLeaseCreateRequest creates the BreakLease request. -func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// breakLeaseHandleResponse handles the BreakLease response. -func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { - result := ContainerClientBreakLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-time"); val != "" { - leaseTime32, err := strconv.ParseInt(val, 10, 32) - leaseTime := int32(leaseTime32) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.LeaseTime = &leaseTime - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { - var err error - req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientChangeLeaseResponse{}, err - } - resp, err := client.changeLeaseHandleResponse(httpResp) - return resp, err -} - -// changeLeaseCreateRequest creates the ChangeLease request. -func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// changeLeaseHandleResponse handles the ChangeLease response. -func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { - result := ContainerClientChangeLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Create - creates a new container under the specified account. If the container with the same name already exists, the operation -// fails -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { - var err error - req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) - if err != nil { - return ContainerClientCreateResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientCreateResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return ContainerClientCreateResponse{}, err - } - resp, err := client.createHandleResponse(httpResp) - return resp, err -} - -// createCreateRequest creates the Create request. -func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Access != nil { - req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { - req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} - } - if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { - req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { - result := ContainerClientCreateResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later -// deleted during garbage collection -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { - var err error - req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return ContainerClientDeleteResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientDeleteResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return ContainerClientDeleteResponse{}, err - } - resp, err := client.deleteHandleResponse(httpResp) - return resp, err -} - -// deleteCreateRequest creates the Delete request. -func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// deleteHandleResponse handles the Delete response. -func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { - result := ContainerClientDeleteResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientDeleteResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. -// Filter blobs searches within the given container. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - where - Filters the results to return only to return only blobs whose tags match the specified expression. -// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. -func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { - var err error - req, err := client.filterBlobsCreateRequest(ctx, where, options) - if err != nil { - return ContainerClientFilterBlobsResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientFilterBlobsResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientFilterBlobsResponse{}, err - } - resp, err := client.filterBlobsHandleResponse(httpResp) - return resp, err -} - -// filterBlobsCreateRequest creates the FilterBlobs request. -func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - reqQP.Set("where", where) - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// filterBlobsHandleResponse handles the FilterBlobs response. -func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { - result := ContainerClientFilterBlobsResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientFilterBlobsResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { - return ContainerClientFilterBlobsResponse{}, err - } - return result, nil -} - -// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may -// be accessed publicly. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy -// method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { - var err error - req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientGetAccessPolicyResponse{}, err - } - resp, err := client.getAccessPolicyHandleResponse(httpResp) - return resp, err -} - -// getAccessPolicyCreateRequest creates the GetAccessPolicy request. -func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "acl") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getAccessPolicyHandleResponse handles the GetAccessPolicy response. -func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { - result := ContainerClientGetAccessPolicyResponse{} - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result); err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - return result, nil -} - -// GetAccountInfo - Returns the sku name and account kind -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo -// method. -func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { - var err error - req, err := client.getAccountInfoCreateRequest(ctx, options) - if err != nil { - return ContainerClientGetAccountInfoResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientGetAccountInfoResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientGetAccountInfoResponse{}, err - } - resp, err := client.getAccountInfoHandleResponse(httpResp) - return resp, err -} - -// getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") - reqQP.Set("comp", "properties") - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { - result := ContainerClientGetAccountInfoResponse{} - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccountInfoResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned -// does not include the container's list of blobs -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { - var err error - req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientGetPropertiesResponse{}, err - } - resp, err := client.getPropertiesHandleResponse(httpResp) - return resp, err -} - -// getPropertiesCreateRequest creates the GetProperties request. -func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getPropertiesHandleResponse handles the GetProperties response. -func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { - result := ContainerClientGetPropertiesResponse{} - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { - hasImmutabilityPolicy, err := strconv.ParseBool(val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.HasImmutabilityPolicy = &hasImmutabilityPolicy - } - if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { - hasLegalHold, err := strconv.ParseBool(val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.HasLegalHold = &hasLegalHold - } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// -// Generated from API version 2023-11-03 -// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -// -// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. -func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. -func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { - result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientListBlobFlatSegmentResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { - return ContainerClientListBlobFlatSegmentResponse{}, err - } - return result, nil -} - -// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// -// Generated from API version 2023-11-03 -// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that -// acts as a placeholder for all blobs whose names begin with the same substring up to the -// appearance of the delimiter character. The delimiter may be a single character or a string. -// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { - return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ - More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - nextLink := "" - if page != nil { - nextLink = *page.NextMarker - } - resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { - return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - }, nil) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - return client.ListBlobHierarchySegmentHandleResponse(resp) - }, - }) -} - -// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. -func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } - reqQP.Set("delimiter", delimiter) - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. -func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { - result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - return result, nil -} - -// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { - var err error - req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientReleaseLeaseResponse{}, err - } - resp, err := client.releaseLeaseHandleResponse(httpResp) - return resp, err -} - -// releaseLeaseCreateRequest creates the ReleaseLease request. -func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// releaseLeaseHandleResponse handles the ReleaseLease response. -func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { - result := ContainerClientReleaseLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Rename - Renames an existing container. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - sourceContainerName - Required. Specifies the name of the container to rename. -// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { - var err error - req, err := client.renameCreateRequest(ctx, sourceContainerName, options) - if err != nil { - return ContainerClientRenameResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientRenameResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientRenameResponse{}, err - } - resp, err := client.renameHandleResponse(httpResp) - return resp, err -} - -// renameCreateRequest creates the Rename request. -func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "rename") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} - if options != nil && options.SourceLeaseID != nil { - req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// renameHandleResponse handles the Rename response. -func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { - result := ContainerClientRenameResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenameResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - leaseID - Specifies the current lease ID on the resource. -// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { - var err error - req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientRenewLeaseResponse{}, err - } - resp, err := client.renewLeaseHandleResponse(httpResp) - return resp, err -} - -// renewLeaseCreateRequest creates the RenewLease request. -func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "lease") - reqQP.Set("restype", "container") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// renewLeaseHandleResponse handles the RenewLease response. -func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { - result := ContainerClientRenewLeaseResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Restore - Restores a previously-deleted container. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { - var err error - req, err := client.restoreCreateRequest(ctx, options) - if err != nil { - return ContainerClientRestoreResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientRestoreResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return ContainerClientRestoreResponse{}, err - } - resp, err := client.restoreHandleResponse(httpResp) - return resp, err -} - -// restoreCreateRequest creates the Restore request. -func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "undelete") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.DeletedContainerName != nil { - req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} - } - if options != nil && options.DeletedContainerVersion != nil { - req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// restoreHandleResponse handles the Restore response. -func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { - result := ContainerClientRestoreResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRestoreResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container -// may be accessed publicly. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - containerACL - the acls for the container -// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy -// method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { - var err error - req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientSetAccessPolicyResponse{}, err - } - resp, err := client.setAccessPolicyHandleResponse(httpResp) - return resp, err -} - -// setAccessPolicyCreateRequest creates the SetAccessPolicy request. -func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "acl") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.Access != nil { - req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - type wrapper struct { - XMLName xml.Name `xml:"SignedIdentifiers"` - ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` - } - if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { - return nil, err - } - return req, nil -} - -// setAccessPolicyHandleResponse handles the SetAccessPolicy response. -func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { - result := ContainerClientSetAccessPolicyResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { - var err error - req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ContainerClientSetMetadataResponse{}, err - } - resp, err := client.setMetadataHandleResponse(httpResp) - return resp, err -} - -// setMetadataCreateRequest creates the SetMetadata request. -func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "metadata") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// setMetadataHandleResponse handles the SetMetadata response. -func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { - result := ContainerClientSetMetadataResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// - body - Initial data -// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { - var err error - req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) - if err != nil { - return ContainerClientSubmitBatchResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientSubmitBatchResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return ContainerClientSubmitBatchResponse{}, err - } - resp, err := client.submitBatchHandleResponse(httpResp) - return resp, err -} - -// submitBatchCreateRequest creates the SubmitBatch request. -func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") - reqQP.Set("comp", "batch") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - runtime.SkipBodyDownload(req) - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, multipartContentType); err != nil { - return nil, err - } - return req, nil -} - -// submitBatchHandleResponse handles the SubmitBatch response. -func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { - result := ContainerClientSubmitBatchResponse{Body: resp.Body} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go deleted file mode 100644 index 7251de839..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ /dev/null @@ -1,544 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "time" -) - -// AccessPolicy - An Access policy -type AccessPolicy struct { - // the date-time the policy expires - Expiry *time.Time `xml:"Expiry"` - - // the permissions for the acl policy - Permission *string `xml:"Permission"` - - // the date-time the policy is active - Start *time.Time `xml:"Start"` -} - -// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. -type ArrowConfiguration struct { - // REQUIRED - Schema []*ArrowField `xml:"Schema>Field"` -} - -// ArrowField - Groups settings regarding specific field of an arrow schema -type ArrowField struct { - // REQUIRED - Type *string `xml:"Type"` - Name *string `xml:"Name"` - Precision *int32 `xml:"Precision"` - Scale *int32 `xml:"Scale"` -} - -type BlobFlatListSegment struct { - // REQUIRED - BlobItems []*BlobItem `xml:"Blob"` -} - -type BlobHierarchyListSegment struct { - // REQUIRED - BlobItems []*BlobItem `xml:"Blob"` - BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` -} - -// BlobItem - An Azure Storage blob -type BlobItem struct { - // REQUIRED - Deleted *bool `xml:"Deleted"` - - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a blob - Properties *BlobProperties `xml:"Properties"` - - // REQUIRED - Snapshot *string `xml:"Snapshot"` - - // Blob tags - BlobTags *BlobTags `xml:"Tags"` - HasVersionsOnly *bool `xml:"HasVersionsOnly"` - IsCurrentVersion *bool `xml:"IsCurrentVersion"` - - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` - - // Dictionary of - OrMetadata map[string]*string `xml:"OrMetadata"` - VersionID *string `xml:"VersionId"` -} - -type BlobName struct { - // The name of the blob. - Content *string `xml:",chardata"` - - // Indicates if the blob name is encoded. - Encoded *bool `xml:"Encoded,attr"` -} - -type BlobPrefix struct { - // REQUIRED - Name *string `xml:"Name"` -} - -// BlobProperties - Properties of a blob -type BlobProperties struct { - // REQUIRED - ETag *azcore.ETag `xml:"Etag"` - - // REQUIRED - LastModified *time.Time `xml:"Last-Modified"` - AccessTier *AccessTier `xml:"AccessTier"` - AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` - AccessTierInferred *bool `xml:"AccessTierInferred"` - ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` - BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` - BlobType *BlobType `xml:"BlobType"` - CacheControl *string `xml:"Cache-Control"` - ContentDisposition *string `xml:"Content-Disposition"` - ContentEncoding *string `xml:"Content-Encoding"` - ContentLanguage *string `xml:"Content-Language"` - - // Size in bytes - ContentLength *int64 `xml:"Content-Length"` - ContentMD5 []byte `xml:"Content-MD5"` - ContentType *string `xml:"Content-Type"` - CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` - CopyID *string `xml:"CopyId"` - CopyProgress *string `xml:"CopyProgress"` - CopySource *string `xml:"CopySource"` - CopyStatus *CopyStatusType `xml:"CopyStatus"` - CopyStatusDescription *string `xml:"CopyStatusDescription"` - CreationTime *time.Time `xml:"Creation-Time"` - CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` - DeletedTime *time.Time `xml:"DeletedTime"` - DestinationSnapshot *string `xml:"DestinationSnapshot"` - - // The name of the encryption scope under which the blob is encrypted. - EncryptionScope *string `xml:"EncryptionScope"` - ExpiresOn *time.Time `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` - ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - IsSealed *bool `xml:"Sealed"` - LastAccessedOn *time.Time `xml:"LastAccessTime"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - LegalHold *bool `xml:"LegalHold"` - - // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High - // and Standard. - RehydratePriority *RehydratePriority `xml:"RehydratePriority"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - ServerEncrypted *bool `xml:"ServerEncrypted"` - TagCount *int32 `xml:"TagCount"` -} - -type BlobTag struct { - // REQUIRED - Key *string `xml:"Key"` - - // REQUIRED - Value *string `xml:"Value"` -} - -// BlobTags - Blob tags -type BlobTags struct { - // REQUIRED - BlobTagSet []*BlobTag `xml:"TagSet>Tag"` -} - -// Block - Represents a single block in a block blob. It describes the block's ID and size. -type Block struct { - // REQUIRED; The base64 encoded block ID. - Name *string `xml:"Name"` - - // REQUIRED; The block size in bytes. - Size *int64 `xml:"Size"` -} - -type BlockList struct { - CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` -} - -type BlockLookupList struct { - Committed []*string `xml:"Committed"` - Latest []*string `xml:"Latest"` - Uncommitted []*string `xml:"Uncommitted"` -} - -type ClearRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -// ContainerItem - An Azure Storage container -type ContainerItem struct { - // REQUIRED - Name *string `xml:"Name"` - - // REQUIRED; Properties of a container - Properties *ContainerProperties `xml:"Properties"` - Deleted *bool `xml:"Deleted"` - - // Dictionary of - Metadata map[string]*string `xml:"Metadata"` - Version *string `xml:"Version"` -} - -// ContainerProperties - Properties of a container -type ContainerProperties struct { - // REQUIRED - ETag *azcore.ETag `xml:"Etag"` - - // REQUIRED - LastModified *time.Time `xml:"Last-Modified"` - DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` - DeletedTime *time.Time `xml:"DeletedTime"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` - - // Indicates if version level worm is enabled on this container. - IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` - LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` - LeaseState *LeaseStateType `xml:"LeaseState"` - LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` - PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` - PublicAccess *PublicAccessType `xml:"PublicAccess"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` -} - -// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another -// domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain -type CORSRule struct { - // REQUIRED; the request headers that the origin domain may specify on the CORS request. - AllowedHeaders *string `xml:"AllowedHeaders"` - - // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) - AllowedMethods *string `xml:"AllowedMethods"` - - // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain - // is the domain from which the request originates. Note that the origin must be an exact - // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' - // to allow all origin domains to make requests via CORS. - AllowedOrigins *string `xml:"AllowedOrigins"` - - // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request - // issuer - ExposedHeaders *string `xml:"ExposedHeaders"` - - // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. - MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` -} - -// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. -type DelimitedTextConfiguration struct { - // The string used to separate columns. - ColumnSeparator *string `xml:"ColumnSeparator"` - - // The string used as an escape character. - EscapeChar *string `xml:"EscapeChar"` - - // The string used to quote a specific field. - FieldQuote *string `xml:"FieldQuote"` - - // Represents whether the data has headers. - HeadersPresent *bool `xml:"HasHeaders"` - - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// FilterBlobItem - Blob info from a Filter Blobs API call -type FilterBlobItem struct { - // REQUIRED - ContainerName *string `xml:"ContainerName"` - - // REQUIRED - Name *string `xml:"Name"` - IsCurrentVersion *bool `xml:"IsCurrentVersion"` - - // Blob tags - Tags *BlobTags `xml:"Tags"` - VersionID *string `xml:"VersionId"` -} - -// FilterBlobSegment - The result of a Filter Blobs API call -type FilterBlobSegment struct { - // REQUIRED - Blobs []*FilterBlobItem `xml:"Blobs>Blob"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - - // REQUIRED - Where *string `xml:"Where"` - NextMarker *string `xml:"NextMarker"` -} - -// GeoReplication - Geo-Replication information for the Secondary Storage Service -type GeoReplication struct { - // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available - // for read operations at the secondary. Primary writes after this point in time may or may - // not be available for reads. - LastSyncTime *time.Time `xml:"LastSyncTime"` - - // REQUIRED; The status of the secondary location - Status *BlobGeoReplicationStatus `xml:"Status"` -} - -// JSONTextConfiguration - json text configuration -type JSONTextConfiguration struct { - // The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// KeyInfo - Key information -type KeyInfo struct { - // REQUIRED; The date-time the key expires in ISO 8601 UTC time - Expiry *string `xml:"Expiry"` - - // REQUIRED; The date-time the key is active in ISO 8601 UTC time - Start *string `xml:"Start"` -} - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobFlatListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListBlobsHierarchySegmentResponse - An enumeration of blobs -type ListBlobsHierarchySegmentResponse struct { - // REQUIRED - ContainerName *string `xml:"ContainerName,attr"` - - // REQUIRED - Segment *BlobHierarchyListSegment `xml:"Blobs"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Delimiter *string `xml:"Delimiter"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse struct { - // REQUIRED - ContainerItems []*ContainerItem `xml:"Containers>Container"` - - // REQUIRED - ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - NextMarker *string `xml:"NextMarker"` - Prefix *string `xml:"Prefix"` -} - -// Logging - Azure Analytics Logging settings. -type Logging struct { - // REQUIRED; Indicates whether all delete requests should be logged. - Delete *bool `xml:"Delete"` - - // REQUIRED; Indicates whether all read requests should be logged. - Read *bool `xml:"Read"` - - // REQUIRED; the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // REQUIRED; The version of Storage Analytics to configure. - Version *string `xml:"Version"` - - // REQUIRED; Indicates whether all write requests should be logged. - Write *bool `xml:"Write"` -} - -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs -type Metrics struct { - // REQUIRED; Indicates whether metrics are enabled for the Blob service. - Enabled *bool `xml:"Enabled"` - - // Indicates whether metrics should generate summary statistics for called API operations. - IncludeAPIs *bool `xml:"IncludeAPIs"` - - // the retention policy which determines how long the associated data should persist - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` - - // The version of Storage Analytics to configure. - Version *string `xml:"Version"` -} - -// PageList - the list of pages -type PageList struct { - ClearRange []*ClearRange `xml:"ClearRange"` - NextMarker *string `xml:"NextMarker"` - PageRange []*PageRange `xml:"PageRange"` -} - -type PageRange struct { - // REQUIRED - End *int64 `xml:"End"` - - // REQUIRED - Start *int64 `xml:"Start"` -} - -type QueryFormat struct { - // REQUIRED; The quick query format type. - Type *QueryFormatType `xml:"Type"` - - // Groups the settings used for formatting the response if the response should be Arrow formatted. - ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` - - // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. - DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` - - // json text configuration - JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` - - // parquet configuration - ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` -} - -// QueryRequest - Groups the set of query request settings. -type QueryRequest struct { - // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. - Expression *string `xml:"Expression"` - - // CONSTANT; Required. The type of the provided query expression. - // Field has constant value "SQL", any specified value is ignored. - QueryType *string `xml:"QueryType"` - InputSerialization *QuerySerialization `xml:"InputSerialization"` - OutputSerialization *QuerySerialization `xml:"OutputSerialization"` -} - -type QuerySerialization struct { - // REQUIRED - Format *QueryFormat `xml:"Format"` -} - -// RetentionPolicy - the retention policy which determines how long the associated data should persist -type RetentionPolicy struct { - // REQUIRED; Indicates whether a retention policy is enabled for the storage service - Enabled *bool `xml:"Enabled"` - - // Indicates whether permanent delete is allowed on this storage account. - AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` - - // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this - // value will be deleted - Days *int32 `xml:"Days"` -} - -// SignedIdentifier - signed identifier -type SignedIdentifier struct { - // REQUIRED; An Access policy - AccessPolicy *AccessPolicy `xml:"AccessPolicy"` - - // REQUIRED; a unique id - ID *string `xml:"Id"` -} - -// StaticWebsite - The properties that enable an account to host a static website -type StaticWebsite struct { - // REQUIRED; Indicates whether this account is hosting a static website - Enabled *bool `xml:"Enabled"` - - // Absolute path of the default index page - DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` - - // The absolute path of the custom 404 page - ErrorDocument404Path *string `xml:"ErrorDocument404Path"` - - // The default name of the index page under each directory - IndexDocument *string `xml:"IndexDocument"` -} - -type StorageError struct { - Message *string -} - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties struct { - // The set of CORS rules. - CORS []*CORSRule `xml:"Cors>CorsRule"` - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string `xml:"DefaultServiceVersion"` - - // the retention policy which determines how long the associated data should persist - DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - HourMetrics *Metrics `xml:"HourMetrics"` - - // Azure Analytics Logging settings. - Logging *Logging `xml:"Logging"` - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - MinuteMetrics *Metrics `xml:"MinuteMetrics"` - - // The properties that enable an account to host a static website - StaticWebsite *StaticWebsite `xml:"StaticWebsite"` -} - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats struct { - // Geo-Replication information for the Secondary Storage Service - GeoReplication *GeoReplication `xml:"GeoReplication"` -} - -// UserDelegationKey - A user delegation key -type UserDelegationKey struct { - // REQUIRED; The date-time the key expires - SignedExpiry *time.Time `xml:"SignedExpiry"` - - // REQUIRED; The Azure Active Directory object ID in GUID format. - SignedOID *string `xml:"SignedOid"` - - // REQUIRED; Abbreviation of the Azure Storage service that accepts the key - SignedService *string `xml:"SignedService"` - - // REQUIRED; The date-time the key is active - SignedStart *time.Time `xml:"SignedStart"` - - // REQUIRED; The Azure Active Directory tenant ID in GUID format - SignedTID *string `xml:"SignedTid"` - - // REQUIRED; The service version that created the key - SignedVersion *string `xml:"SignedVersion"` - - // REQUIRED; The key as a base64 string - Value *string `xml:"Value"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go deleted file mode 100644 index 7e094db87..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ /dev/null @@ -1,472 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "encoding/json" - "encoding/xml" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "reflect" - "time" -) - -// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. -func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *dateTimeRFC3339 `xml:"Expiry"` - Start *dateTimeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(&a), - Expiry: (*dateTimeRFC3339)(a.Expiry), - Start: (*dateTimeRFC3339)(a.Start), - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. -func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias AccessPolicy - aux := &struct { - *alias - Expiry *dateTimeRFC3339 `xml:"Expiry"` - Start *dateTimeRFC3339 `xml:"Start"` - }{ - alias: (*alias)(a), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. -func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias ArrowConfiguration - aux := &struct { - *alias - Schema *[]*ArrowField `xml:"Schema>Field"` - }{ - alias: (*alias)(&a), - } - if a.Schema != nil { - aux.Schema = &a.Schema - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. -func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias BlobFlatListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItem `xml:"Blob"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. -func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias BlobHierarchyListSegment - aux := &struct { - *alias - BlobItems *[]*BlobItem `xml:"Blob"` - BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` - }{ - alias: (*alias)(&b), - } - if b.BlobItems != nil { - aux.BlobItems = &b.BlobItems - } - if b.BlobPrefixes != nil { - aux.BlobPrefixes = &b.BlobPrefixes - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type BlobProperties. -func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias BlobProperties - aux := &struct { - *alias - AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` - DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` - ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` - LastModified *dateTimeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&b), - AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), - CreationTime: (*dateTimeRFC1123)(b.CreationTime), - DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), - ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), - LastModified: (*dateTimeRFC1123)(b.LastModified), - } - if b.ContentMD5 != nil { - encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) - aux.ContentMD5 = &encodedContentMD5 - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties. -func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobProperties - aux := &struct { - *alias - AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` - DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` - ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` - LastModified *dateTimeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) - if aux.ContentMD5 != nil { - if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { - return err - } - } - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type BlobTags. -func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "Tags" - type alias BlobTags - aux := &struct { - *alias - BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` - }{ - alias: (*alias)(&b), - } - if b.BlobTagSet != nil { - aux.BlobTagSet = &b.BlobTagSet - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type BlockList. -func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias BlockList - aux := &struct { - *alias - CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` - }{ - alias: (*alias)(&b), - } - if b.CommittedBlocks != nil { - aux.CommittedBlocks = &b.CommittedBlocks - } - if b.UncommittedBlocks != nil { - aux.UncommittedBlocks = &b.UncommittedBlocks - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. -func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "BlockList" - type alias BlockLookupList - aux := &struct { - *alias - Committed *[]*string `xml:"Committed"` - Latest *[]*string `xml:"Latest"` - Uncommitted *[]*string `xml:"Uncommitted"` - }{ - alias: (*alias)(&b), - } - if b.Committed != nil { - aux.Committed = &b.Committed - } - if b.Latest != nil { - aux.Latest = &b.Latest - } - if b.Uncommitted != nil { - aux.Uncommitted = &b.Uncommitted - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. -func (c *ContainerItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias ContainerItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - }{ - alias: (*alias)(c), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - c.Metadata = (map[string]*string)(aux.Metadata) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. -func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` - LastModified *dateTimeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(&c), - DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), - LastModified: (*dateTimeRFC1123)(c.LastModified), - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. -func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias ContainerProperties - aux := &struct { - *alias - DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` - LastModified *dateTimeRFC1123 `xml:"Last-Modified"` - }{ - alias: (*alias)(c), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. -func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias FilterBlobSegment - aux := &struct { - *alias - Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` - }{ - alias: (*alias)(&f), - } - if f.Blobs != nil { - aux.Blobs = &f.Blobs - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type GeoReplication. -func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(&g), - LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. -func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias GeoReplication - aux := &struct { - *alias - LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` - }{ - alias: (*alias)(g), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. -func (l ListContainersSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias ListContainersSegmentResponse - aux := &struct { - *alias - ContainerItems *[]*ContainerItem `xml:"Containers>Container"` - }{ - alias: (*alias)(&l), - } - if l.ContainerItems != nil { - aux.ContainerItems = &l.ContainerItems - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type PageList. -func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias PageList - aux := &struct { - *alias - ClearRange *[]*ClearRange `xml:"ClearRange"` - PageRange *[]*PageRange `xml:"PageRange"` - }{ - alias: (*alias)(&p), - } - if p.ClearRange != nil { - aux.ClearRange = &p.ClearRange - } - if p.PageRange != nil { - aux.PageRange = &p.PageRange - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type QueryRequest. -func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - start.Name.Local = "QueryRequest" - type alias QueryRequest - aux := &struct { - *alias - }{ - alias: (*alias)(&q), - } - return enc.EncodeElement(aux, start) -} - -// MarshalJSON implements the json.Marshaller interface for type StorageError. -func (s StorageError) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]any) - populate(objectMap, "Message", s.Message) - return json.Marshal(objectMap) -} - -// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. -func (s *StorageError) UnmarshalJSON(data []byte) error { - var rawMsg map[string]json.RawMessage - if err := json.Unmarshal(data, &rawMsg); err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) - } - for key, val := range rawMsg { - var err error - switch key { - case "Message": - err = unpopulate(val, "Message", &s.Message) - delete(rawMsg, key) - } - if err != nil { - return fmt.Errorf("unmarshalling type %T: %v", s, err) - } - } - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. -func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias StorageServiceProperties - aux := &struct { - *alias - CORS *[]*CORSRule `xml:"Cors>CorsRule"` - }{ - alias: (*alias)(&s), - } - if s.CORS != nil { - aux.CORS = &s.CORS - } - return enc.EncodeElement(aux, start) -} - -// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. -func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` - SignedStart *dateTimeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(&u), - SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), - SignedStart: (*dateTimeRFC3339)(u.SignedStart), - } - return enc.EncodeElement(aux, start) -} - -// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. -func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias UserDelegationKey - aux := &struct { - *alias - SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` - SignedStart *dateTimeRFC3339 `xml:"SignedStart"` - }{ - alias: (*alias)(u), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) - return nil -} - -func populate(m map[string]any, k string, v any) { - if v == nil { - return - } else if azcore.IsNullValue(v) { - m[k] = nil - } else if !reflect.ValueOf(v).IsNil() { - m[k] = v - } -} - -func populateAny(m map[string]any, k string, v any) { - if v == nil { - return - } else if azcore.IsNullValue(v) { - m[k] = nil - } else { - m[k] = v - } -} - -func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { - return nil - } - if err := json.Unmarshal(data, v); err != nil { - return fmt.Errorf("struct field %s: %v", fn, err) - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go deleted file mode 100644 index 216f8b73a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go +++ /dev/null @@ -1,1469 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "time" -) - -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - - // Bytes of source data in the specified range. - SourceRange *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string -} - -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - - // Bytes of source data in the specified range. - SourceRange *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. -type ContainerClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - - // Specified if a legal hold should be set on the blob. - LegalHold *bool - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - - // Return only the bytes of the blob in the specified range. - Range *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Return only the bytes of the blob in the specified range. - Range *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go deleted file mode 100644 index cb6a19f7a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ /dev/null @@ -1,1295 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "time" -) - -// PageBlobClient contains the methods for the PageBlob group. -// Don't use this type directly, use a constructor function instead. -type PageBlobClient struct { - internal *azcore.Client - endpoint string -} - -// ClearPages - The Clear Pages operation clears a set of pages from a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { - var err error - req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientClearPagesResponse{}, err - } - resp, err := client.clearPagesHandleResponse(httpResp) - return resp, err -} - -// clearPagesCreateRequest creates the ClearPages request. -func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"clear"} - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// clearPagesHandleResponse handles the ClearPages response. -func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { - result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. -// The snapshot is copied such that only the differential changes between the previously copied -// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can -// be read or copied from as usual. This API is supported since REST version -// 2016-05-31. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { - var err error - req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientCopyIncrementalResponse{}, err - } - resp, err := client.copyIncrementalHandleResponse(httpResp) - return resp, err -} - -// copyIncrementalCreateRequest creates the CopyIncremental request. -func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "incrementalcopy") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// copyIncrementalHandleResponse handles the CopyIncremental response. -func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { - result := PageBlobClientCopyIncrementalResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// Create - The Create operation creates a new page blob. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { - var err error - req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientCreateResponse{}, err - } - resp, err := client.createHandleResponse(httpResp) - return resp, err -} - -// createCreateRequest creates the Create request. -func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { - req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { - req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - if options != nil && options.BlobSequenceNumber != nil { - req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// createHandleResponse handles the Create response. -func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { - result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } - return result, nil -} - -// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot -// of a page blob -// -// Generated from API version 2023-11-03 -// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager -// method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { - return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ - More: func(page PageBlobClientGetPageRangesResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - nextLink := "" - if page != nil { - nextLink = *page.NextMarker - } - resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { - return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - }, nil) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - return client.GetPageRangesHandleResponse(resp) - }, - }) -} - -// GetPageRangesCreateRequest creates the GetPageRanges request. -func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// GetPageRangesHandleResponse handles the GetPageRanges response. -func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { - result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - return result, nil -} - -// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that -// were changed between target blob and previous snapshot. -// -// Generated from API version 2023-11-03 -// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { - return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ - More: func(page PageBlobClientGetPageRangesDiffResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - nextLink := "" - if page != nil { - nextLink = *page.NextMarker - } - resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { - return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - }, nil) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - return client.GetPageRangesDiffHandleResponse(resp) - }, - }) -} - -// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. -func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *options.Prevsnapshot) - } - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.PrevSnapshotURL != nil { - req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} - } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. -func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { - result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.BlobContentLength = &blobContentLength - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - return result, nil -} - -// Resize - Resize the Blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { - var err error - req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientResizeResponse{}, err - } - resp, err := client.resizeHandleResponse(httpResp) - return resp, err -} - -// resizeCreateRequest creates the Resize request. -func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// resizeHandleResponse handles the Resize response. -func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { - result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// UpdateSequenceNumber - Update the sequence number of the blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to -// page blobs only. This property indicates how the service should modify the blob's sequence number -// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { - var err error - req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - resp, err := client.updateSequenceNumberHandleResponse(httpResp) - return resp, err -} - -// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. -func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "properties") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} - if options != nil && options.BlobSequenceNumber != nil { - req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. -func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { - result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// UploadPages - The Upload Pages operation writes a range of pages to a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - body - Initial data -// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { - var err error - req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientUploadPagesResponse{}, err - } - resp, err := client.uploadPagesHandleResponse(httpResp) - return resp, err -} - -// uploadPagesCreateRequest creates the UploadPages request. -func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} - } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, "application/octet-stream"); err != nil { - return nil, err - } - return req, nil -} - -// uploadPagesHandleResponse handles the UploadPages response. -func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { - result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from -// a URL -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - sourceURL - Specify a URL to the copy source. -// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header -// and x-ms-range/Range destination range header. -// - contentLength - The length of the request. -// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end -// is required. -// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL -// method. -// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. -// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { - var err error - req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusCreated) { - err = runtime.NewResponseError(httpResp) - return PageBlobClientUploadPagesFromURLResponse{}, err - } - resp, err := client.uploadPagesFromURLHandleResponse(httpResp) - return resp, err -} - -// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. -func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "page") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - req.Raw().Header["x-ms-source-range"] = []string{sourceRange} - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-range"] = []string{rangeParam} - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { - req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} - } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. -func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { - result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go deleted file mode 100644 index 738d23c8f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ /dev/null @@ -1,2016 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "io" - "time" -) - -// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. -type AppendBlobClientAppendBlockFromURLResponse struct { - // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. - BlobAppendOffset *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. -type AppendBlobClientAppendBlockResponse struct { - // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. - BlobAppendOffset *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. -type AppendBlobClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. -type AppendBlobClientSealResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. -type BlobClientAbortCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. -type BlobClientAcquireLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. -type BlobClientBreakLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseTime contains the information returned from the x-ms-lease-time header response. - LeaseTime *int32 - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. -type BlobClientChangeLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. -type BlobClientCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. -type BlobClientCreateSnapshotResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Snapshot contains the information returned from the x-ms-snapshot header response. - Snapshot *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. -type BlobClientDeleteImmutabilityPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientDeleteResponse contains the response from method BlobClient.Delete. -type BlobClientDeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientDownloadResponse contains the response from method BlobClient.Download. -type BlobClientDownloadResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. - BlobContentMD5 []byte - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // Body contains the streaming response. - Body io.ReadCloser - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentRange contains the information returned from the Content-Range header response. - ContentRange *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // CreationTime contains the information returned from the x-ms-creation-time header response. - CreationTime *time.Time - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // ErrorCode contains the information returned from the x-ms-error-code header response. - ErrorCode *string - - // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiresOn *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. - IsCurrentVersion *bool - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastAccessed contains the information returned from the x-ms-last-access-time header response. - LastAccessed *time.Time - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. - ObjectReplicationPolicyID *string - - // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // TagCount contains the information returned from the x-ms-tag-count header response. - TagCount *int64 - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. -type BlobClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. -type BlobClientGetPropertiesResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // AccessTier contains the information returned from the x-ms-access-tier header response. - AccessTier *string - - // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. - AccessTierChangeTime *time.Time - - // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. - AccessTierInferred *bool - - // ArchiveStatus contains the information returned from the x-ms-archive-status header response. - ArchiveStatus *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // CreationTime contains the information returned from the x-ms-creation-time header response. - CreationTime *time.Time - - // Date contains the information returned from the Date header response. - Date *time.Time - - // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. - DestinationSnapshot *string - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // ExpiresOn contains the information returned from the x-ms-expiry-time header response. - ExpiresOn *time.Time - - // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiresOn *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. - IsCurrentVersion *bool - - // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. - IsIncrementalCopy *bool - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastAccessed contains the information returned from the x-ms-last-access-time header response. - LastAccessed *time.Time - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. - ObjectReplicationPolicyID *string - - // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]*string - - // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. - RehydratePriority *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // TagCount contains the information returned from the x-ms-tag-count header response. - TagCount *int64 - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. -type BlobClientGetTagsResponse struct { - // Blob tags - BlobTags - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientQueryResponse contains the response from method BlobClient.Query. -type BlobClientQueryResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. - BlobContentMD5 []byte - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // Body contains the streaming response. - Body io.ReadCloser - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentRange contains the information returned from the Content-Range header response. - ContentRange *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. -type BlobClientReleaseLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. -type BlobClientRenewLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. -type BlobClientSetExpiryResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. -type BlobClientSetHTTPHeadersResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. -type BlobClientSetImmutabilityPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiry *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. -type BlobClientSetLegalHoldResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. -type BlobClientSetMetadataResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. -type BlobClientSetTagsResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. -type BlobClientSetTierResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. -type BlobClientStartCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. -type BlobClientUndeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. -type BlockBlobClientCommitBlockListResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. -type BlockBlobClientGetBlockListResponse struct { - BlockList - - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. -type BlockBlobClientPutBlobFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. -type BlockBlobClientStageBlockFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. -type BlockBlobClientStageBlockResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. -type BlockBlobClientUploadResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. -type ContainerClientAcquireLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. -type ContainerClientBreakLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseTime contains the information returned from the x-ms-lease-time header response. - LeaseTime *int32 - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. -type ContainerClientChangeLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientCreateResponse contains the response from method ContainerClient.Create. -type ContainerClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. -type ContainerClientDeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. -type ContainerClientFilterBlobsResponse struct { - // The result of a Filter Blobs API call - FilterBlobSegment - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. -type ContainerClientGetAccessPolicyResponse struct { - // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // a collection of signed identifiers - SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. -type ContainerClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. -type ContainerClientGetPropertiesResponse struct { - // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. - DefaultEncryptionScope *string - - // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. - DenyEncryptionScopeOverride *bool - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. - HasImmutabilityPolicy *bool - - // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. - HasLegalHold *bool - - // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled - // header response. - IsImmutableStorageWithVersioningEnabled *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. -type ContainerClientListBlobFlatSegmentResponse struct { - // An enumeration of blobs - ListBlobsFlatSegmentResponse - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. -type ContainerClientListBlobHierarchySegmentResponse struct { - // An enumeration of blobs - ListBlobsHierarchySegmentResponse - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. -type ContainerClientReleaseLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. -type ContainerClientRenameResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. -type ContainerClientRenewLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. -type ContainerClientRestoreResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. -type ContainerClientSetAccessPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. -type ContainerClientSetMetadataResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. -type ContainerClientSubmitBatchResponse struct { - // Body contains the streaming response. - Body io.ReadCloser - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. -type PageBlobClientClearPagesResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. -type PageBlobClientCopyIncrementalResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. -type PageBlobClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. -type PageBlobClientGetPageRangesDiffResponse struct { - // the list of pages - PageList - - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. -type PageBlobClientGetPageRangesResponse struct { - // the list of pages - PageList - - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. -type PageBlobClientResizeResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. -type PageBlobClientUpdateSequenceNumberResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. -type PageBlobClientUploadPagesFromURLResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. -type PageBlobClientUploadPagesResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. -type ServiceClientFilterBlobsResponse struct { - // The result of a Filter Blobs API call - FilterBlobSegment - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. -type ServiceClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. - IsHierarchicalNamespaceEnabled *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. -type ServiceClientGetPropertiesResponse struct { - // Storage Service Properties. - StorageServiceProperties - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. -type ServiceClientGetStatisticsResponse struct { - // Stats for the storage service. - StorageServiceStats - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. -type ServiceClientGetUserDelegationKeyResponse struct { - // A user delegation key - UserDelegationKey - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. -type ServiceClientListContainersSegmentResponse struct { - // An enumeration of containers - ListContainersSegmentResponse - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. -type ServiceClientSetPropertiesResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. -type ServiceClientSubmitBatchResponse struct { - // Body contains the streaming response. - Body io.ReadCloser - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go deleted file mode 100644 index c792fbf09..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ /dev/null @@ -1,580 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "context" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "io" - "net/http" - "strconv" - "strings" - "time" -) - -// ServiceClient contains the methods for the Service group. -// Don't use this type directly, use a constructor function instead. -type ServiceClient struct { - internal *azcore.Client - endpoint string -} - -// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search -// expression. Filter blobs searches across all containers within a storage account but can -// be scoped within the expression to a single container. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - where - Filters the results to return only to return only blobs whose tags match the specified expression. -// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { - var err error - req, err := client.filterBlobsCreateRequest(ctx, where, options) - if err != nil { - return ServiceClientFilterBlobsResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientFilterBlobsResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ServiceClientFilterBlobsResponse{}, err - } - resp, err := client.filterBlobsHandleResponse(httpResp) - return resp, err -} - -// filterBlobsCreateRequest creates the FilterBlobs request. -func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - reqQP.Set("where", where) - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) - } - req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// filterBlobsHandleResponse handles the FilterBlobs response. -func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { - result := ServiceClientFilterBlobsResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ServiceClientFilterBlobsResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { - return ServiceClientFilterBlobsResponse{}, err - } - return result, nil -} - -// GetAccountInfo - Returns the sku name and account kind -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { - var err error - req, err := client.getAccountInfoCreateRequest(ctx, options) - if err != nil { - return ServiceClientGetAccountInfoResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientGetAccountInfoResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ServiceClientGetAccountInfoResponse{}, err - } - resp, err := client.getAccountInfoHandleResponse(httpResp) - return resp, err -} - -// getAccountInfoCreateRequest creates the GetAccountInfo request. -func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") - reqQP.Set("comp", "properties") - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getAccountInfoHandleResponse handles the GetAccountInfo response. -func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { - result := ServiceClientGetAccountInfoResponse{} - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ServiceClientGetAccountInfoResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { - isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) - if err != nil { - return ServiceClientGetAccountInfoResponse{}, err - } - result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and -// CORS (Cross-Origin Resource Sharing) rules. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { - var err error - req, err := client.getPropertiesCreateRequest(ctx, options) - if err != nil { - return ServiceClientGetPropertiesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientGetPropertiesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ServiceClientGetPropertiesResponse{}, err - } - resp, err := client.getPropertiesHandleResponse(httpResp) - return resp, err -} - -// getPropertiesCreateRequest creates the GetProperties request. -func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") - reqQP.Set("comp", "properties") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getPropertiesHandleResponse handles the GetProperties response. -func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { - result := ServiceClientGetPropertiesResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { - return ServiceClientGetPropertiesResponse{}, err - } - return result, nil -} - -// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary -// location endpoint when read-access geo-redundant replication is enabled for the storage account. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { - var err error - req, err := client.getStatisticsCreateRequest(ctx, options) - if err != nil { - return ServiceClientGetStatisticsResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientGetStatisticsResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ServiceClientGetStatisticsResponse{}, err - } - resp, err := client.getStatisticsHandleResponse(httpResp) - return resp, err -} - -// getStatisticsCreateRequest creates the GetStatistics request. -func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") - reqQP.Set("comp", "stats") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// getStatisticsHandleResponse handles the GetStatistics response. -func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { - result := ServiceClientGetStatisticsResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ServiceClientGetStatisticsResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { - return ServiceClientGetStatisticsResponse{}, err - } - return result, nil -} - -// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using -// bearer token authentication. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - keyInfo - Key information -// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey -// method. -func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { - var err error - req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) - if err != nil { - return ServiceClientGetUserDelegationKeyResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientGetUserDelegationKeyResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusOK) { - err = runtime.NewResponseError(httpResp) - return ServiceClientGetUserDelegationKeyResponse{}, err - } - resp, err := client.getUserDelegationKeyHandleResponse(httpResp) - return resp, err -} - -// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. -func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") - reqQP.Set("comp", "userdelegationkey") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := runtime.MarshalAsXML(req, keyInfo); err != nil { - return nil, err - } - return req, nil -} - -// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. -func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { - result := ServiceClientGetUserDelegationKeyResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ServiceClientGetUserDelegationKeyResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { - return ServiceClientGetUserDelegationKeyResponse{}, err - } - return result, nil -} - -// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified -// account -// -// Generated from API version 2023-11-03 -// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -// -// listContainersSegmentCreateRequest creates the ListContainersSegment request. -func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } - if options != nil && options.Marker != nil { - reqQP.Set("marker", *options.Marker) - } - if options != nil && options.Maxresults != nil { - reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) - } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// listContainersSegmentHandleResponse handles the ListContainersSegment response. -func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { - result := ServiceClientListContainersSegmentResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { - return ServiceClientListContainersSegmentResponse{}, err - } - return result, nil -} - -// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - storageServiceProperties - The StorageService properties. -// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { - var err error - req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) - if err != nil { - return ServiceClientSetPropertiesResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientSetPropertiesResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return ServiceClientSetPropertiesResponse{}, err - } - resp, err := client.setPropertiesHandleResponse(httpResp) - return resp, err -} - -// setPropertiesCreateRequest creates the SetProperties request. -func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") - reqQP.Set("comp", "properties") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { - return nil, err - } - return req, nil -} - -// setPropertiesHandleResponse handles the SetProperties response. -func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { - result := ServiceClientSetPropertiesResponse{} - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} - -// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. -// If the operation fails it returns an *azcore.ResponseError type. -// -// Generated from API version 2023-11-03 -// - contentLength - The length of the request. -// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// - body - Initial data -// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { - var err error - req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) - if err != nil { - return ServiceClientSubmitBatchResponse{}, err - } - httpResp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ServiceClientSubmitBatchResponse{}, err - } - if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { - err = runtime.NewResponseError(httpResp) - return ServiceClientSubmitBatchResponse{}, err - } - resp, err := client.submitBatchHandleResponse(httpResp) - return resp, err -} - -// submitBatchCreateRequest creates the SubmitBatch request. -func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { - req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) - if err != nil { - return nil, err - } - reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "batch") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - req.Raw().URL.RawQuery = reqQP.Encode() - runtime.SkipBodyDownload(req) - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} - if err := req.SetBody(body, multipartContentType); err != nil { - return nil, err - } - return req, nil -} - -// submitBatchHandleResponse handles the SubmitBatch response. -func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { - result := ServiceClientSubmitBatchResponse{Body: resp.Body} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go deleted file mode 100644 index 586650329..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "strings" - "time" -) - -const ( - dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` -) - -type dateTimeRFC1123 time.Time - -func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) - return b, nil -} - -func (t dateTimeRFC1123) MarshalText() ([]byte, error) { - b := []byte(time.Time(t).Format(time.RFC1123)) - return b, nil -} - -func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) - *t = dateTimeRFC1123(p) - return err -} - -func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { - p, err := time.Parse(time.RFC1123, string(data)) - *t = dateTimeRFC1123(p) - return err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go deleted file mode 100644 index 82b370133..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "regexp" - "strings" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) - -const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` -) - -type dateTimeRFC3339 time.Time - -func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { - tt := time.Time(t) - return tt.MarshalJSON() -} - -func (t dateTimeRFC3339) MarshalText() ([]byte, error) { - tt := time.Time(t) - return tt.MarshalText() -} - -func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { - layout = dateTimeJSON - } - return t.Parse(layout, string(data)) -} - -func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { - layout = time.RFC3339Nano - } - return t.Parse(layout, string(data)) -} - -func (t *dateTimeRFC3339) Parse(layout, value string) error { - p, err := time.Parse(layout, strings.ToUpper(value)) - *t = dateTimeRFC3339(p) - return err -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go deleted file mode 100644 index 1bd0e4de0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -package generated - -import ( - "encoding/xml" - "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "io" - "strings" -) - -type additionalProperties map[string]*string - -// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. -func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - tokName := "" - tokValue := "" - for { - t, err := d.Token() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return err - } - switch tt := t.(type) { - case xml.StartElement: - tokName = strings.ToLower(tt.Name.Local) - tokValue = "" - case xml.CharData: - if tokName == "" { - continue - } - tokValue = string(tt) - case xml.EndElement: - if tokName == "" { - continue - } - if *ap == nil { - *ap = additionalProperties{} - } - (*ap)[tokName] = to.Ptr(tokValue) - tokName = "" - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go deleted file mode 100644 index 9f95ad8ae..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "context" - "errors" -) - -const ( - DefaultConcurrency = 5 -) - -// BatchTransferOptions identifies options used by doBatchTransfer. -type BatchTransferOptions struct { - TransferSize int64 - ChunkSize int64 - NumChunks uint64 - Concurrency uint16 - Operation func(ctx context.Context, offset int64, chunkSize int64) error - OperationName string -} - -// DoBatchTransfer helps to execute operations in a batch manner. -// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) -func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { - if o.ChunkSize == 0 { - return errors.New("ChunkSize cannot be 0") - } - - if o.Concurrency == 0 { - o.Concurrency = DefaultConcurrency // default concurrency - } - - // Prepare and do parallel operations. - operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently - operationResponseChannel := make(chan error, o.NumChunks) // Holds each response - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create the goroutines that process each operation (in parallel). - for g := uint16(0); g < o.Concurrency; g++ { - //grIndex := g - go func() { - for f := range operationChannel { - err := f() - operationResponseChannel <- err - } - }() - } - - // Add each chunk's operation to the channel. - for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { - curChunkSize := o.ChunkSize - - if chunkNum == o.NumChunks-1 { // Last chunk - curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total - } - offset := int64(chunkNum) * o.ChunkSize - operationChannel <- func() error { - return o.Operation(ctx, offset, curChunkSize) - } - } - close(operationChannel) - - // Wait for the operations to complete. - var firstErr error = nil - for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { - responseError := <-operationResponseChannel - // record the first error (the original error which should cause the other chunks to fail with canceled context) - if responseError != nil && firstErr == nil { - cancel() // As soon as any operation fails, cancel all remaining operation calls - firstErr = responseError - } - } - return firstErr -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go deleted file mode 100644 index e3aa4a488..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -type BufferManager[T ~[]byte] interface { - // Acquire returns the channel that contains the pool of buffers. - Acquire() <-chan T - - // Release releases the buffer back to the pool for reuse/cleanup. - Release(T) - - // Grow grows the number of buffers, up to the predefined max. - // It returns the total number of buffers or an error. - // No error is returned if the number of buffers has reached max. - // This is called only from the reading goroutine. - Grow() (int, error) - - // Free cleans up all buffers. - Free() -} - -// mmbPool implements the bufferManager interface. -// it uses anonymous memory mapped files for buffers. -// don't use this type directly, use newMMBPool() instead. -type mmbPool struct { - buffers chan Mmb - count int - max int - size int64 -} - -func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { - return &mmbPool{ - buffers: make(chan Mmb, maxBuffers), - max: maxBuffers, - size: bufferSize, - } -} - -func (pool *mmbPool) Acquire() <-chan Mmb { - return pool.buffers -} - -func (pool *mmbPool) Grow() (int, error) { - if pool.count < pool.max { - buffer, err := NewMMB(pool.size) - if err != nil { - return 0, err - } - pool.buffers <- buffer - pool.count++ - } - return pool.count, nil -} - -func (pool *mmbPool) Release(buffer Mmb) { - pool.buffers <- buffer -} - -func (pool *mmbPool) Free() { - for i := 0; i < pool.count; i++ { - buffer := <-pool.buffers - buffer.Delete() - } - pool.count = 0 -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go deleted file mode 100644 index 8d4d35bde..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "errors" -) - -type bytesWriter []byte - -func NewBytesWriter(b []byte) bytesWriter { - return b -} - -func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { - if off >= int64(len(c)) || off < 0 { - return 0, errors.New("offset value is out of range") - } - - n := copy(c[int(off):], b) - if n < len(b) { - return n, errors.New("not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go deleted file mode 100644 index 1c81b9db9..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go +++ /dev/null @@ -1,113 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "strings" -) - -type storageAuthorizer struct { - scopes []string - tenantID string -} - -func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string) policy.Policy { - s := storageAuthorizer{scopes: []string{audience}} - return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ - AuthorizationHandler: policy.AuthorizationHandler{ - OnRequest: s.onRequest, - OnChallenge: s.onChallenge, - }, - }) -} - -func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { - return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) -} - -func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { - // parse the challenge - err := s.parseChallenge(resp) - if err != nil { - return err - } - // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 - return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) -} - -type challengePolicyError struct { - err error -} - -func (c *challengePolicyError) Error() string { - return c.err.Error() -} - -func (*challengePolicyError) NonRetriable() { - // marker method -} - -func (c *challengePolicyError) Unwrap() error { - return c.err -} - -// parses Tenant ID from auth challenge -// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize -func parseTenant(url string) string { - if url == "" { - return "" - } - parts := strings.Split(url, "/") - if len(parts) >= 3 { - tenant := parts[3] - tenant = strings.ReplaceAll(tenant, ",", "") - return tenant - } else { - return "" - } -} - -func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { - authHeader := resp.Header.Get("WWW-Authenticate") - if authHeader == "" { - return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} - } - - // Strip down to auth and resource - // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" - authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") - - parts := strings.Split(authHeader, " ") - - vals := map[string]string{} - for _, part := range parts { - subParts := strings.Split(part, "=") - if len(subParts) == 2 { - stripped := strings.ReplaceAll(subParts[1], "\"", "") - stripped = strings.TrimSuffix(stripped, ",") - vals[subParts[0]] = stripped - } - } - - s.tenantID = parseTenant(vals["authorization_uri"]) - - scope := vals["resource_id"] - if scope == "" { - return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} - } - - if !strings.HasSuffix(scope, "/.default") { - scope += "/.default" - } - s.scopes = []string{scope} - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go deleted file mode 100644 index cdcadf311..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) -// +build go1.18 -// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "fmt" - "os" - "syscall" -) - -// mmb is a memory mapped buffer -type Mmb []byte - -// newMMB creates a new memory mapped buffer with the specified size -func NewMMB(size int64) (Mmb, error) { - prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE - addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) - if err != nil { - return nil, os.NewSyscallError("Mmap", err) - } - return Mmb(addr), nil -} - -// delete cleans up the memory mapped buffer -func (m *Mmb) Delete() { - err := syscall.Munmap(*m) - *m = nil - if err != nil { - // if we get here, there is likely memory corruption. - // please open an issue https://github.com/Azure/azure-sdk-for-go/issues - panic(fmt.Sprintf("Munmap error: %v", err)) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go deleted file mode 100644 index ef9fdc2a1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "fmt" - "os" - "reflect" - "syscall" - "unsafe" -) - -// Mmb is a memory mapped buffer -type Mmb []byte - -// NewMMB creates a new memory mapped buffer with the specified size -func NewMMB(size int64) (Mmb, error) { - const InvalidHandleValue = ^uintptr(0) // -1 - - prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) - hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) - if err != nil { - return nil, os.NewSyscallError("CreateFileMapping", err) - } - defer func() { - _ = syscall.CloseHandle(hMMF) - }() - - addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) - if err != nil { - return nil, os.NewSyscallError("MapViewOfFile", err) - } - - m := Mmb{} - h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) - h.Data = addr - h.Len = int(size) - h.Cap = h.Len - return m, nil -} - -// Delete cleans up the memory mapped buffer -func (m *Mmb) Delete() { - addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = Mmb{} - err := syscall.UnmapViewOfFile(addr) - if err != nil { - // if we get here, there is likely memory corruption. - // please open an issue https://github.com/Azure/azure-sdk-for-go/issues - panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go deleted file mode 100644 index c8528a2e3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "errors" - "io" -) - -type SectionWriter struct { - Count int64 - Offset int64 - Position int64 - WriterAt io.WriterAt -} - -func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { - return &SectionWriter{ - Count: count, - Offset: off, - WriterAt: c, - } -} - -func (c *SectionWriter) Write(p []byte) (int, error) { - remaining := c.Count - c.Position - - if remaining <= 0 { - return 0, errors.New("end of section reached") - } - - slice := p - - if int64(len(slice)) > remaining { - slice = slice[:remaining] - } - - n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) - c.Position += int64(n) - if err != nil { - return n, err - } - - if len(p) > n { - return n, errors.New("not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go deleted file mode 100644 index c7922076f..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "errors" - "fmt" - "hash/crc64" - "io" - "net" - "net/url" - "strconv" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -const ( - TokenScope = "https://storage.azure.com/.default" -) - -const ( - HeaderAuthorization = "Authorization" - HeaderXmsDate = "x-ms-date" - HeaderContentLength = "Content-Length" - HeaderContentEncoding = "Content-Encoding" - HeaderContentLanguage = "Content-Language" - HeaderContentType = "Content-Type" - HeaderContentMD5 = "Content-MD5" - HeaderIfModifiedSince = "If-Modified-Since" - HeaderIfMatch = "If-Match" - HeaderIfNoneMatch = "If-None-Match" - HeaderIfUnmodifiedSince = "If-Unmodified-Since" - HeaderRange = "Range" - HeaderXmsVersion = "x-ms-version" - HeaderXmsRequestID = "x-ms-request-id" -) - -const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 - -var CRC64Table = crc64.MakeTable(crc64Polynomial) - -// CopyOptions returns a zero-value T if opts is nil. -// If opts is not nil, a copy is made and its address returned. -func CopyOptions[T any](opts *T) *T { - if opts == nil { - return new(T) - } - cp := *opts - return &cp -} - -var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + - "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + - "AccountKey=;EndpointSuffix=core.windows.net'") - -type ParsedConnectionString struct { - ServiceURL string - AccountName string - AccountKey string -} - -func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { - const ( - defaultScheme = "https" - defaultSuffix = "core.windows.net" - ) - - connStrMap := make(map[string]string) - connectionString = strings.TrimRight(connectionString, ";") - - splitString := strings.Split(connectionString, ";") - if len(splitString) == 0 { - return ParsedConnectionString{}, errConnectionString - } - for _, stringPart := range splitString { - parts := strings.SplitN(stringPart, "=", 2) - if len(parts) != 2 { - return ParsedConnectionString{}, errConnectionString - } - connStrMap[parts[0]] = parts[1] - } - - protocol, ok := connStrMap["DefaultEndpointsProtocol"] - if !ok { - protocol = defaultScheme - } - - suffix, ok := connStrMap["EndpointSuffix"] - if !ok { - suffix = defaultSuffix - } - - blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] - accountName, has_accountName := connStrMap["AccountName"] - - var serviceURL string - if has_blobEndpoint { - serviceURL = blobEndpoint - } else if has_accountName { - serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) - } else { - return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") - } - - if !strings.HasSuffix(serviceURL, "/") { - // add a trailing slash to be consistent with the portal - serviceURL += "/" - } - - accountKey, has_accountKey := connStrMap["AccountKey"] - sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] - - if has_accountName && has_accountKey { - return ParsedConnectionString{ - ServiceURL: serviceURL, - AccountName: accountName, - AccountKey: accountKey, - }, nil - } else if has_sharedAccessSignature { - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), - }, nil - } else { - return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") - } - -} - -// SerializeBlobTags converts tags to generated.BlobTags -func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - blobTagSet := make([]*generated.BlobTag, 0) - for key, val := range tagsMap { - newKey, newVal := key, val - blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) - } - return &generated.BlobTags{BlobTagSet: blobTagSet} -} - -func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if len(tagsMap) == 0 { - return nil - } - tags := make([]string, 0) - for key, val := range tagsMap { - tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) - } - blobTagsString := strings.Join(tags, "&") - return &blobTagsString -} - -func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { - if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long - return 0, nil - } - - err := validateSeekableStreamAt0(body) - if err != nil { - return 0, err - } - - count, err := body.Seek(0, io.SeekEnd) - if err != nil { - return 0, errors.New("body stream must be seekable") - } - - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return 0, err - } - return count, nil -} - -// return an error if body is not a valid seekable stream at 0 -func validateSeekableStreamAt0(body io.ReadSeeker) error { - if body == nil { // nil body's are "logically" seekable to 0 - return nil - } - if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { - // Help detect programmer error - if err != nil { - return errors.New("body stream must be seekable") - } - return errors.New("body stream must be set to position 0") - } - return nil -} - -func RangeToString(offset, count int64) string { - return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) -} - -type nopCloser struct { - io.ReadSeeker -} - -func (n nopCloser) Close() error { - return nil -} - -// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. -func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { - return nopCloser{rs} -} - -func GenerateLeaseID(leaseID *string) (*string, error) { - if leaseID == nil { - generatedUuid, err := uuid.New() - if err != nil { - return nil, err - } - leaseID = to.Ptr(generatedUuid.String()) - } - return leaseID, nil -} - -func GetClientOptions[T any](o *T) *T { - if o == nil { - return new(T) - } - return o -} - -// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: -// http(s)://IP(:port)/storageaccount/container/... -// As url's Host property, host could be both host or host:port -func IsIPEndpointStyle(host string) bool { - if host == "" { - return false - } - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } - // For IPv6, there could be case where SplitHostPort fails for cannot finding port. - // In this case, eliminate the '[' and ']' in the URL. - // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 - if host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - return net.ParseIP(host) != nil -} - -// ReadAtLeast reads from r into buf until it has read at least min bytes. -// It returns the number of bytes copied and an error. -// The EOF error is returned if no bytes were read or -// EOF happened after reading fewer than min bytes. -// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. -// On return, n >= min if and only if err == nil. -// If r returns an error having read at least min bytes, the error is dropped. -// This method is same as io.ReadAtLeast except that it does not -// return io.ErrUnexpectedEOF when fewer than min bytes are read. -func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { - if len(buf) < min { - return 0, io.ErrShortBuffer - } - for n < min && err == nil { - var nn int - nn, err = r.Read(buf[n:]) - n += nn - } - if n >= min { - err = nil - } - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go deleted file mode 100644 index 4d26992d3..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -const ( - // EventUpload is used for logging events related to upload operation. - EventUpload = exported.EventUpload - - // EventSubmitBatch is used for logging events related to submit blob batch operation. - EventSubmitBatch = exported.EventSubmitBatch -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md deleted file mode 100644 index 1b1a4b45d..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md +++ /dev/null @@ -1,76 +0,0 @@ -# Guide to migrate from `azure-storage-blob-go` to `azblob` - -This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module. - -## Simplified API surface area - -The redesign of the `azblob` module separates clients into various sub-packages. -In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package. -This made it difficult to navigate the public surface area. - -## Clients - -In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters. - -In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options. - -```go -// new code -client, err := azblob.NewClient("", cred, nil) -``` - -## Authentication - -In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor. - -In `azblob`, you pass the required credential directly to the client constructor. - -```go -// new code. cred is an AAD token credential created from the azidentity module -client, err := azblob.NewClient("", cred, nil) -``` - -The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`. -This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). - -Authentication with a shared key via `NewSharedKeyCredential` remains unchanged. - -In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication. - -In `azblob` you use the construtor `NewClientWithNoCredential()` instead. - -```go -// new code -client, err := azblob.NewClientWithNoCredential("", nil) -``` - -## Listing blobs/containers - -In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)). - -In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`. - -```go -// new code -pager := client.NewListBlobsFlatPager("my-container", nil) -for pager.More() { - page, err := pager.NextPage(context.TODO()) - // process results -} -``` - -## Configuring the HTTP pipeline - -In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client. -This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)). - -In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type. - -```go -// new code -client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{ - ClientOptions: azcore.ClientOptions{ - // configure HTTP pipeline options here - }, -}) -``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go deleted file mode 100644 index 2896788e1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" -) - -// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method. -type CreateContainerOptions = service.CreateContainerOptions - -// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. -type DeleteContainerOptions = service.DeleteContainerOptions - -// DeleteBlobOptions contains the optional parameters for the Client.Delete method. -type DeleteBlobOptions = blob.DeleteOptions - -// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. -type DownloadStreamOptions = blob.DownloadStreamOptions - -// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. -type ListBlobsFlatOptions = container.ListBlobsFlatOptions - -// ListBlobsInclude indicates what additional information the service should return with each blob. -type ListBlobsInclude = container.ListBlobsInclude - -// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation -type ListContainersOptions = service.ListContainersOptions - -// UploadBufferOptions provides set of configurations for UploadBuffer operation -type UploadBufferOptions = blockblob.UploadBufferOptions - -// UploadFileOptions provides set of configurations for UploadFile operation -type UploadFileOptions = blockblob.UploadFileOptions - -// UploadStreamOptions provides set of configurations for UploadStream operation -type UploadStreamOptions = blockblob.UploadStreamOptions - -// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions. -type DownloadBufferOptions = blob.DownloadBufferOptions - -// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. -type DownloadFileOptions = blob.DownloadFileOptions - -// CPKInfo contains a group of parameters for client provided encryption key. -type CPKInfo = blob.CPKInfo - -// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CPKScopeInfo = container.CPKScopeInfo - -// AccessConditions identifies blob-specific access conditions which you optionally set. -type AccessConditions = exported.BlobAccessConditions - -// ListContainersInclude indicates what additional information the service should return with each container. -type ListContainersInclude = service.ListContainersInclude - -// ObjectReplicationPolicy are deserialized attributes -type ObjectReplicationPolicy = blob.ObjectReplicationPolicy - -// RetryReaderOptions contains properties which can help to decide when to do retry. -type RetryReaderOptions = blob.RetryReaderOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go deleted file mode 100644 index ca196f2c8..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ /dev/null @@ -1,454 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package pageblob - -import ( - "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" - "io" - "net/http" - "net/url" - "os" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a client to an Azure Storage page blob; -type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] - -// NewClient creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a blob or with a shared access signature (SAS) token. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt -// - cred - a SharedKeyCredential created with the matching blob's storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - containerName - the name of the container within the storage account -// - blobName - the name of the blob within the container -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -func (pb *Client) generated() *generated.PageBlobClient { - _, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) - return pageBlob -} - -// URL returns the URL endpoint used by the Client object. -func (pb *Client) URL() string { - return pb.generated().Endpoint() -} - -// BlobClient returns the embedded blob client for this AppendBlob client. -func (pb *Client) BlobClient() *blob.Client { - innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) - return (*blob.Client)(innerBlob) -} - -func (pb *Client) sharedKey() *blob.SharedKeyCredential { - return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) -} - -// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { - p, err := blob.ParseURL(pb.URL()) - if err != nil { - return nil, err - } - p.Snapshot = snapshot - - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil -} - -// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the version returning a URL to the base blob. -func (pb *Client) WithVersionID(versionID string) (*Client, error) { - p, err := blob.ParseURL(pb.URL()) - if err != nil { - return nil, err - } - p.VersionID = versionID - - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil -} - -// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) { - createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() - - resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders, - leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - return resp, err -} - -// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, contentRange blob.HTTPRange, options *UploadPagesOptions) (UploadPagesResponse, error) { - count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) - - if err != nil { - return UploadPagesResponse{}, err - } - - uploadPagesOptions := &generated.PageBlobClientUploadPagesOptions{ - Range: exported.FormatHTTPRange(contentRange), - } - - leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - if options != nil && options.TransactionalValidation != nil { - body, err = options.TransactionalValidation.Apply(body, uploadPagesOptions) - if err != nil { - return UploadPagesResponse{}, nil - } - } - - resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, - cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return resp, err -} - -// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. -// The sourceOffset specifies the start offset of source data to copy from. -// The destOffset specifies the start offset of data in page blob will be written to. -// The count must be a multiple of 512 bytes. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. -func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, - o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) { - - uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, - modifiedAccessConditions, sourceModifiedAccessConditions := o.format() - - resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0, - shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, - sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) - - return resp, err -} - -// ClearPages frees the specified pages from the page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) { - clearOptions := &generated.PageBlobClientClearPagesOptions{ - Range: exported.FormatHTTPRange(rnge), - } - - leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() - - resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, - cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) - - return resp, err -} - -// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] { - opts, leaseAccessConditions, modifiedAccessConditions := o.format() - - return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{ - More: func(page GetPageRangesResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) - } else { - opts.Marker = page.NextMarker - req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) - } - if err != nil { - return GetPageRangesResponse{}, err - } - resp, err := pb.generated().InternalClient().Pipeline().Do(req) - if err != nil { - return GetPageRangesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return GetPageRangesResponse{}, runtime.NewResponseError(resp) - } - return pb.generated().GetPageRangesHandleResponse(resp) - }, - }) -} - -// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] { - opts, leaseAccessConditions, modifiedAccessConditions := o.format() - - return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{ - More: func(page GetPageRangesDiffResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) - } else { - opts.Marker = page.NextMarker - req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) - } - if err != nil { - return GetPageRangesDiffResponse{}, err - } - resp, err := pb.generated().InternalClient().Pipeline().Do(req) - if err != nil { - return GetPageRangesDiffResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } - return pb.generated().GetPageRangesDiffHandleResponse(resp) - }, - }) -} - -// Resize resizes the page blob to the specified size (which must be a multiple of 512). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { - resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() - - resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) - - return resp, err -} - -// UpdateSequenceNumber sets the page blob's sequence number. -func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) { - actionType, updateOptions, lac, mac := options.format() - resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) - - return resp, err -} - -// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob. -// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. -// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and -// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. -func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) { - copySourceURL, err := url.Parse(copySource) - if err != nil { - return CopyIncrementalResponse{}, err - } - - queryParams := copySourceURL.Query() - queryParams.Set("snapshot", prevSnapshot) - copySourceURL.RawQuery = queryParams.Encode() - - pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() - resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) - - return resp, err -} - -// Redeclared APIs - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { - return pb.BlobClient().Delete(ctx, o) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { - return pb.BlobClient().Undelete(ctx, o) -} - -// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (pb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { - return pb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) -} - -// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (pb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { - return pb.BlobClient().DeleteImmutabilityPolicy(ctx, options) -} - -// SetLegalHold operation enables users to set legal hold on a blob. -// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview -func (pb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { - return pb.BlobClient().SetLegalHold(ctx, legalHold, options) -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return pb.BlobClient().SetTier(ctx, tier, o) -} - -// GetProperties returns the blob's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { - return pb.BlobClient().GetProperties(ctx, o) -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { - return pb.BlobClient().GetAccountInfo(ctx, o) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { - return pb.BlobClient().SetMetadata(ctx, metadata, o) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { - return pb.BlobClient().CreateSnapshot(ctx, o) -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { - return pb.BlobClient().StartCopyFromURL(ctx, copySource, o) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { - return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { - return pb.BlobClient().SetTags(ctx, tags, o) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { - return pb.BlobClient().GetTags(ctx, o) -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return pb.BlobClient().CopyFromURL(ctx, copySource, o) -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { - return pb.BlobClient().GetSASURL(permissions, expiry, o) -} - -// Concurrent Download Functions ----------------------------------------------------------------------------------------- - -// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { - return pb.BlobClient().DownloadStream(ctx, o) -} - -// DownloadBuffer downloads an Azure blob to a buffer with parallel. -func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { - return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) -} - -// DownloadFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { - return pb.BlobClient().DownloadFile(ctx, file, o) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go deleted file mode 100644 index 096a7910a..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package pageblob - -import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - -const ( - // PageBytes indicates the number of bytes in a page (512). - PageBytes = 512 -) - -// CopyStatusType defines values for CopyStatusType -type CopyStatusType = generated.CopyStatusType - -const ( - CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending - CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess - CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted - CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed -) - -// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. -func PossibleCopyStatusTypeValues() []CopyStatusType { - return generated.PossibleCopyStatusTypeValues() -} - -// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier. -type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier - -const ( - PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10 - PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15 - PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20 - PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30 - PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4 - PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40 - PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50 - PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6 - PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60 - PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70 - PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80 -) - -// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. -func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { - return generated.PossiblePremiumPageBlobAccessTierValues() -} - -// SequenceNumberActionType defines values for SequenceNumberActionType. -type SequenceNumberActionType = generated.SequenceNumberActionType - -const ( - SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax - SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate - SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement -) - -// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. -func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { - return generated.PossibleSequenceNumberActionTypeValues() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go deleted file mode 100644 index 39aef20ff..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ /dev/null @@ -1,330 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package pageblob - -import ( - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// Type Declarations --------------------------------------------------------------------- - -// PageList - the list of pages. -type PageList = generated.PageList - -// ClearRange defines a range of pages. -type ClearRange = generated.ClearRange - -// PageRange defines a range of pages. -type PageRange = generated.PageRange - -// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method. -type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions - -// Request Model Declaration ------------------------------------------------------------------------------------------- - -// CreateOptions contains the optional parameters for the Client.Create method. -type CreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - SequenceNumber *int64 - - // Optional. Used to set blob tags in various blob operations. - Tags map[string]string - - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs - // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source - // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. - // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]*string - - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - - HTTPHeaders *blob.HTTPHeaders - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - AccessConditions *blob.AccessConditions - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode - // Specified if a legal hold should be set on the blob. - LegalHold *bool -} - -func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, - *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &generated.PageBlobClientCreateOptions{ - BlobSequenceNumber: o.SequenceNumber, - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), - Metadata: o.Metadata, - Tier: o.Tier, - } - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadPagesOptions contains the optional parameters for the Client.UploadPages method. -type UploadPagesOptions struct { - // TransactionalValidation specifies the transfer validation type to use. - // The default is nil (no transfer validation). - TransactionalValidation blob.TransferValidationType - - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - SequenceNumberAccessConditions *SequenceNumberAccessConditions - AccessConditions *blob.AccessConditions -} - -func (o *UploadPagesOptions) format() (*generated.LeaseAccessConditions, - *generated.CPKInfo, *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method. -type UploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - - // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. - SourceContentValidation blob.SourceContentValidationType - - CPKInfo *blob.CPKInfo - - CPKScopeInfo *blob.CPKScopeInfo - - SequenceNumberAccessConditions *SequenceNumberAccessConditions - - SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - - AccessConditions *blob.AccessConditions -} - -func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, - *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil, nil, nil - } - - options := &generated.PageBlobClientUploadPagesFromURLOptions{ - CopySourceAuthorization: o.CopySourceAuthorization, - } - - if o.SourceContentValidation != nil { - o.SourceContentValidation.Apply(options) - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ClearPagesOptions contains the optional parameters for the Client.ClearPages operation -type ClearPagesOptions struct { - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - SequenceNumberAccessConditions *SequenceNumberAccessConditions - AccessConditions *blob.AccessConditions -} - -func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CPKInfo, - *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. -type GetPageRangesOptions struct { - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by MaxResults, or than the default of 5000. - MaxResults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by PrevSnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - PrevSnapshot *string - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - AccessConditions *blob.AccessConditions -} - -func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.PageBlobClientGetPageRangesOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - Range: exported.FormatHTTPRange(o.Range), - Snapshot: o.Snapshot, - }, leaseAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method. -type GetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by MaxResults, or than the default of 5000. - MaxResults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by PrevSnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - PrevSnapshot *string - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange - - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - - AccessConditions *blob.AccessConditions -} - -func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &generated.PageBlobClientGetPageRangesDiffOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - PrevSnapshotURL: o.PrevSnapshotURL, - Prevsnapshot: o.PrevSnapshot, - Range: exported.FormatHTTPRange(o.Range), - Snapshot: o.Snapshot, - }, leaseAccessConditions, modifiedAccessConditions - -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ResizeOptions contains the optional parameters for the Client.Resize method. -type ResizeOptions struct { - CPKInfo *blob.CPKInfo - CPKScopeInfo *blob.CPKScopeInfo - AccessConditions *blob.AccessConditions -} - -func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, - *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil, nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method. -type UpdateSequenceNumberOptions struct { - ActionType *SequenceNumberActionType - - SequenceNumber *int64 - - AccessConditions *blob.AccessConditions -} - -func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions, - *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil, nil - } - - options := &generated.PageBlobClientUpdateSequenceNumberOptions{ - BlobSequenceNumber: o.SequenceNumber, - } - - if *o.ActionType == SequenceNumberActionTypeIncrement { - options.BlobSequenceNumber = nil - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- - -// CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method. -type CopyIncrementalOptions struct { - ModifiedAccessConditions *blob.ModifiedAccessConditions -} - -func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil - } - - return nil, o.ModifiedAccessConditions -} - -// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go deleted file mode 100644 index 876efbab1..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package pageblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// CreateResponse contains the response from method Client.Create. -type CreateResponse = generated.PageBlobClientCreateResponse - -// UploadPagesResponse contains the response from method Client.UploadPages. -type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse - -// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL. -type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse - -// ClearPagesResponse contains the response from method Client.ClearPages. -type ClearPagesResponse = generated.PageBlobClientClearPagesResponse - -// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager. -type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse - -// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager. -type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse - -// ResizeResponse contains the response from method Client.Resize. -type ResizeResponse = generated.PageBlobClientResizeResponse - -// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber. -type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse - -// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental. -type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go deleted file mode 100644 index 86b05d098..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package azblob - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" -) - -// CreateContainerResponse contains the response from method container.Client.Create. -type CreateContainerResponse = service.CreateContainerResponse - -// DeleteContainerResponse contains the response from method container.Client.Delete -type DeleteContainerResponse = service.DeleteContainerResponse - -// DeleteBlobResponse contains the response from method blob.Client.Delete. -type DeleteBlobResponse = blob.DeleteResponse - -// UploadResponse contains the response from method blockblob.Client.CommitBlockList. -type UploadResponse = blockblob.CommitBlockListResponse - -// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. -type DownloadStreamResponse = blob.DownloadStreamResponse - -// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. -type ListBlobsFlatResponse = container.ListBlobsFlatResponse - -// ListContainersResponse contains the response from method service.Client.ListContainersSegment. -type ListContainersResponse = service.ListContainersResponse - -// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. -type UploadBufferResponse = blockblob.UploadBufferResponse - -// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. -type UploadFileResponse = blockblob.UploadFileResponse - -// UploadStreamResponse contains the response from method Client.CommitBlockList. -type UploadStreamResponse = blockblob.CommitBlockListResponse - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse = generated.ListContainersSegmentResponse - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go deleted file mode 100644 index 4069bb132..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ /dev/null @@ -1,229 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package sas - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential = exported.SharedKeyCredential - -// UserDelegationCredential contains an account's name and its user delegation key. -type UserDelegationCredential = exported.UserDelegationCredential - -// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas -type AccountSignatureValues struct { - Version string `param:"sv"` // If not specified, this format to SASVersion - Protocol Protocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() - IPRange IPRange `param:"sip"` - ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() - EncryptionScope string `param:"ses"` -} - -// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { - return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = Version - } - perms, err := parseAccountPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - - resources, err := parseAccountResourceTypes(v.ResourceTypes) - if err != nil { - return QueryParameters{}, err - } - v.ResourceTypes = resources.String() - - startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - "b", // blob service - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - v.EncryptionScope, - ""}, // That is right, the account SAS requires a terminating extra newline - "\n") - - signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) - if err != nil { - return QueryParameters{}, err - } - p := QueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - encryptionScope: v.EncryptionScope, - - // Account-specific SAS parameters - services: "b", // will always be "b" - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. -type AccountPermissions struct { - Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool -} - -// String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSignatureValues' Permissions field. -func (p *AccountPermissions) String() string { - var buffer bytes.Buffer - if p.Read { - buffer.WriteRune('r') - } - if p.Write { - buffer.WriteRune('w') - } - if p.Delete { - buffer.WriteRune('d') - } - if p.DeletePreviousVersion { - buffer.WriteRune('x') - } - if p.PermanentDelete { - buffer.WriteRune('y') - } - if p.List { - buffer.WriteRune('l') - } - if p.Add { - buffer.WriteRune('a') - } - if p.Create { - buffer.WriteRune('c') - } - if p.Update { - buffer.WriteRune('u') - } - if p.Process { - buffer.WriteRune('p') - } - if p.FilterByTags { - buffer.WriteRune('f') - } - if p.Tag { - buffer.WriteRune('t') - } - if p.SetImmutabilityPolicy { - buffer.WriteRune('i') - } - return buffer.String() -} - -// Parse initializes the AccountPermissions' fields from a string. -func parseAccountPermissions(s string) (AccountPermissions, error) { - p := AccountPermissions{} // Clear out the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'x': - p.DeletePreviousVersion = true - case 'y': - p.PermanentDelete = true - case 'l': - p.List = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'u': - p.Update = true - case 'p': - p.Process = true - case 't': - p.Tag = true - case 'f': - p.FilterByTags = true - case 'i': - p.SetImmutabilityPolicy = true - default: - return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) - } - } - return p, nil -} - -// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. -type AccountResourceTypes struct { - Service, Container, Object bool -} - -// String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSignatureValues' ResourceTypes field. -func (rt *AccountResourceTypes) String() string { - var buffer bytes.Buffer - if rt.Service { - buffer.WriteRune('s') - } - if rt.Container { - buffer.WriteRune('c') - } - if rt.Object { - buffer.WriteRune('o') - } - return buffer.String() -} - -// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. -func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { - rt := AccountResourceTypes{} - for _, r := range s { - switch r { - case 's': - rt.Service = true - case 'c': - rt.Container = true - case 'o': - rt.Object = true - default: - return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) - } - } - return rt, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go deleted file mode 100644 index 4c23208e2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ /dev/null @@ -1,451 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package sas - -import ( - "errors" - "net" - "net/url" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const ( - TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 -) - -var ( - // Version is the default version encoded in the SAS token. - Version = "2021-12-02" -) - -// TimeFormats ISO 8601 format. -// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. -var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} - -// Protocol indicates the http/https. -type Protocol string - -const ( - // ProtocolHTTPS can be specified for a SAS protocol. - ProtocolHTTPS Protocol = "https" - - // ProtocolHTTPSandHTTP can be specified for a SAS protocol. - ProtocolHTTPSandHTTP Protocol = "https,http" -) - -// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a -// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). -func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { - ss := "" - if !startTime.IsZero() { - ss = formatTimeWithDefaultFormat(&startTime) - } - se := "" - if !expiryTime.IsZero() { - se = formatTimeWithDefaultFormat(&expiryTime) - } - sh := "" - if !snapshotTime.IsZero() { - sh = snapshotTime.Format(exported.SnapshotTimeFormat) - } - return ss, se, sh -} - -// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". -func formatTimeWithDefaultFormat(t *time.Time) string { - return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. -func formatTime(t *time.Time, format string) string { - if format != "" { - return t.Format(format) - } - return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// ParseTime try to parse a SAS time string. -func parseTime(val string) (t time.Time, timeFormat string, err error) { - for _, sasTimeFormat := range timeFormats { - t, err = time.Parse(sasTimeFormat, val) - if err == nil { - timeFormat = sasTimeFormat - break - } - } - - if err != nil { - err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") - } - - return -} - -// IPRange represents a SAS IP range's start IP and (optionally) end IP. -type IPRange struct { - Start net.IP // Not specified if length = 0 - End net.IP // Not specified if length = 0 -} - -// String returns a string representation of an IPRange. -func (ipr *IPRange) String() string { - if len(ipr.Start) == 0 { - return "" - } - start := ipr.Start.String() - if len(ipr.End) == 0 { - return start - } - return start + "-" + ipr.End.String() -} - -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas - -// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. -// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components -// to a query parameter map by calling AddToValues(). -// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. -// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). -type QueryParameters struct { - // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol Protocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - snapshotTime time.Time `param:"snapshot"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` - cacheControl string `param:"rscc"` - contentDisposition string `param:"rscd"` - contentEncoding string `param:"rsce"` - contentLanguage string `param:"rscl"` - contentType string `param:"rsct"` - signedOID string `param:"skoid"` - signedTID string `param:"sktid"` - signedStart time.Time `param:"skt"` - signedService string `param:"sks"` - signedExpiry time.Time `param:"ske"` - signedVersion string `param:"skv"` - signedDirectoryDepth string `param:"sdd"` - authorizedObjectID string `param:"saoid"` - unauthorizedObjectID string `param:"suoid"` - correlationID string `param:"scid"` - encryptionScope string `param:"ses"` - // private member used for startTime and expiryTime formatting. - stTimeFormat string - seTimeFormat string -} - -// AuthorizedObjectID returns authorizedObjectID. -func (p *QueryParameters) AuthorizedObjectID() string { - return p.authorizedObjectID -} - -// UnauthorizedObjectID returns unauthorizedObjectID. -func (p *QueryParameters) UnauthorizedObjectID() string { - return p.unauthorizedObjectID -} - -// SignedCorrelationID returns signedCorrelationID. -func (p *QueryParameters) SignedCorrelationID() string { - return p.correlationID -} - -// EncryptionScope returns encryptionScope -func (p *QueryParameters) EncryptionScope() string { - return p.encryptionScope -} - -// SignedOID returns signedOID. -func (p *QueryParameters) SignedOID() string { - return p.signedOID -} - -// SignedTID returns signedTID. -func (p *QueryParameters) SignedTID() string { - return p.signedTID -} - -// SignedStart returns signedStart. -func (p *QueryParameters) SignedStart() time.Time { - return p.signedStart -} - -// SignedExpiry returns signedExpiry. -func (p *QueryParameters) SignedExpiry() time.Time { - return p.signedExpiry -} - -// SignedService returns signedService. -func (p *QueryParameters) SignedService() string { - return p.signedService -} - -// SignedVersion returns signedVersion. -func (p *QueryParameters) SignedVersion() string { - return p.signedVersion -} - -// SnapshotTime returns snapshotTime. -func (p *QueryParameters) SnapshotTime() time.Time { - return p.snapshotTime -} - -// Version returns version. -func (p *QueryParameters) Version() string { - return p.version -} - -// Services returns services. -func (p *QueryParameters) Services() string { - return p.services -} - -// ResourceTypes returns resourceTypes. -func (p *QueryParameters) ResourceTypes() string { - return p.resourceTypes -} - -// Protocol returns protocol. -func (p *QueryParameters) Protocol() Protocol { - return p.protocol -} - -// StartTime returns startTime. -func (p *QueryParameters) StartTime() time.Time { - return p.startTime -} - -// ExpiryTime returns expiryTime. -func (p *QueryParameters) ExpiryTime() time.Time { - return p.expiryTime -} - -// IPRange returns ipRange. -func (p *QueryParameters) IPRange() IPRange { - return p.ipRange -} - -// Identifier returns identifier. -func (p *QueryParameters) Identifier() string { - return p.identifier -} - -// Resource returns resource. -func (p *QueryParameters) Resource() string { - return p.resource -} - -// Permissions returns permissions. -func (p *QueryParameters) Permissions() string { - return p.permissions -} - -// Signature returns signature. -func (p *QueryParameters) Signature() string { - return p.signature -} - -// CacheControl returns cacheControl. -func (p *QueryParameters) CacheControl() string { - return p.cacheControl -} - -// ContentDisposition returns contentDisposition. -func (p *QueryParameters) ContentDisposition() string { - return p.contentDisposition -} - -// ContentEncoding returns contentEncoding. -func (p *QueryParameters) ContentEncoding() string { - return p.contentEncoding -} - -// ContentLanguage returns contentLanguage. -func (p *QueryParameters) ContentLanguage() string { - return p.contentLanguage -} - -// ContentType returns contentType. -func (p *QueryParameters) ContentType() string { - return p.contentType -} - -// SignedDirectoryDepth returns signedDirectoryDepth. -func (p *QueryParameters) SignedDirectoryDepth() string { - return p.signedDirectoryDepth -} - -// Encode encodes the SAS query parameters into URL encoded form sorted by key. -func (p *QueryParameters) Encode() string { - v := url.Values{} - - if p.version != "" { - v.Add("sv", p.version) - } - if p.services != "" { - v.Add("ss", p.services) - } - if p.resourceTypes != "" { - v.Add("srt", p.resourceTypes) - } - if p.protocol != "" { - v.Add("spr", string(p.protocol)) - } - if !p.startTime.IsZero() { - v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) - } - if !p.expiryTime.IsZero() { - v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) - } - if len(p.ipRange.Start) > 0 { - v.Add("sip", p.ipRange.String()) - } - if p.identifier != "" { - v.Add("si", p.identifier) - } - if p.resource != "" { - v.Add("sr", p.resource) - } - if p.permissions != "" { - v.Add("sp", p.permissions) - } - if p.signedOID != "" { - v.Add("skoid", p.signedOID) - v.Add("sktid", p.signedTID) - v.Add("skt", p.signedStart.Format(TimeFormat)) - v.Add("ske", p.signedExpiry.Format(TimeFormat)) - v.Add("sks", p.signedService) - v.Add("skv", p.signedVersion) - } - if p.signature != "" { - v.Add("sig", p.signature) - } - if p.cacheControl != "" { - v.Add("rscc", p.cacheControl) - } - if p.contentDisposition != "" { - v.Add("rscd", p.contentDisposition) - } - if p.contentEncoding != "" { - v.Add("rsce", p.contentEncoding) - } - if p.contentLanguage != "" { - v.Add("rscl", p.contentLanguage) - } - if p.contentType != "" { - v.Add("rsct", p.contentType) - } - if p.signedDirectoryDepth != "" { - v.Add("sdd", p.signedDirectoryDepth) - } - if p.authorizedObjectID != "" { - v.Add("saoid", p.authorizedObjectID) - } - if p.unauthorizedObjectID != "" { - v.Add("suoid", p.unauthorizedObjectID) - } - if p.correlationID != "" { - v.Add("scid", p.correlationID) - } - if p.encryptionScope != "" { - v.Add("ses", p.encryptionScope) - } - - return v.Encode() -} - -// NewQueryParameters creates and initializes a QueryParameters object based on the -// query parameter map's passed-in values. If deleteSASParametersFromValues is true, -// all SAS-related query parameters are removed from the passed-in map. If -// deleteSASParametersFromValues is false, the map passed-in map is unaltered. -func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { - p := QueryParameters{} - for k, v := range values { - val := v[0] - isSASKey := true - switch strings.ToLower(k) { - case "sv": - p.version = val - case "ss": - p.services = val - case "srt": - p.resourceTypes = val - case "spr": - p.protocol = Protocol(val) - case "snapshot": - p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) - case "st": - p.startTime, p.stTimeFormat, _ = parseTime(val) - case "se": - p.expiryTime, p.seTimeFormat, _ = parseTime(val) - case "sip": - dashIndex := strings.Index(val, "-") - if dashIndex == -1 { - p.ipRange.Start = net.ParseIP(val) - } else { - p.ipRange.Start = net.ParseIP(val[:dashIndex]) - p.ipRange.End = net.ParseIP(val[dashIndex+1:]) - } - case "si": - p.identifier = val - case "sr": - p.resource = val - case "sp": - p.permissions = val - case "sig": - p.signature = val - case "rscc": - p.cacheControl = val - case "rscd": - p.contentDisposition = val - case "rsce": - p.contentEncoding = val - case "rscl": - p.contentLanguage = val - case "rsct": - p.contentType = val - case "skoid": - p.signedOID = val - case "sktid": - p.signedTID = val - case "skt": - p.signedStart, _ = time.Parse(TimeFormat, val) - case "ske": - p.signedExpiry, _ = time.Parse(TimeFormat, val) - case "sks": - p.signedService = val - case "skv": - p.signedVersion = val - case "sdd": - p.signedDirectoryDepth = val - case "saoid": - p.authorizedObjectID = val - case "suoid": - p.unauthorizedObjectID = val - case "scid": - p.correlationID = val - case "ses": - p.encryptionScope = val - default: - isSASKey = false // We didn't recognize the query parameter - } - if isSASKey && deleteSASParametersFromValues { - delete(values, k) - } - } - return p -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go deleted file mode 100644 index 45f730847..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ /dev/null @@ -1,472 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package sas - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. -// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas -// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas -type BlobSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to Version - Protocol Protocol `param:"spr"` // See the Protocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - SnapshotTime time.Time - Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() - IPRange IPRange `param:"sip"` - Identifier string `param:"si"` - ContainerName string - BlobName string // Use "" to create a Container SAS - Directory string // Not nil for a directory SAS (ie sr=d) - CacheControl string // rscc - ContentDisposition string // rscd - ContentEncoding string // rsce - ContentLanguage string // rscl - ContentType string // rsct - BlobVersion string // sr=bv - AuthorizedObjectID string // saoid - UnauthorizedObjectID string // suoid - CorrelationID string // scid - EncryptionScope string `param:"ses"` -} - -func getDirectoryDepth(path string) string { - if path == "" { - return "" - } - return fmt.Sprint(strings.Count(path, "/") + 1) -} - -// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. -func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { - return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") - } - - // Parse the resource - resource := "c" - if !v.SnapshotTime.IsZero() { - resource = "bs" - } else if v.BlobVersion != "" { - resource = "bv" - } else if v.Directory != "" { - resource = "d" - v.BlobName = "" - } else if v.BlobName == "" { - // do nothing - } else { - resource = "b" - } - - // make sure the permission characters are in the correct order - if resource == "c" { - perms, err := parseContainerPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - } else { - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - } - - if v.Version == "" { - v.Version = Version - } - startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) - - signedIdentifier := v.Identifier - - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - stringToSign := strings.Join([]string{ - v.Permissions, - startTime, - expiryTime, - getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), - signedIdentifier, - v.IPRange.String(), - string(v.Protocol), - v.Version, - resource, - snapshotTime, // signed timestamp - v.EncryptionScope, - v.CacheControl, // rscc - v.ContentDisposition, // rscd - v.ContentEncoding, // rsce - v.ContentLanguage, // rscl - v.ContentType}, // rsct - "\n") - - signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) - if err != nil { - return QueryParameters{}, err - } - - p := QueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - encryptionScope: v.EncryptionScope, - - // Container/Blob-specific SAS parameters - resource: resource, - identifier: v.Identifier, - cacheControl: v.CacheControl, - contentDisposition: v.ContentDisposition, - contentEncoding: v.ContentEncoding, - contentLanguage: v.ContentLanguage, - contentType: v.ContentType, - snapshotTime: v.SnapshotTime, - signedDirectoryDepth: getDirectoryDepth(v.Directory), - authorizedObjectID: v.AuthorizedObjectID, - unauthorizedObjectID: v.UnauthorizedObjectID, - correlationID: v.CorrelationID, - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. -func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { - if userDelegationCredential == nil { - return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") - } - - if v.ExpiryTime.IsZero() || v.Permissions == "" { - return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") - } - - // Parse the resource - resource := "c" - if !v.SnapshotTime.IsZero() { - resource = "bs" - } else if v.BlobVersion != "" { - resource = "bv" - } else if v.Directory != "" { - resource = "d" - v.BlobName = "" - } else if v.BlobName == "" { - // do nothing - } else { - resource = "b" - } - // make sure the permission characters are in the correct order - if resource == "c" { - perms, err := parseContainerPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - } else { - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - } - - if v.Version == "" { - v.Version = Version - } - startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) - - udk := exported.GetUDKParams(userDelegationCredential) - - udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) - - stringToSign := strings.Join([]string{ - v.Permissions, - startTime, - expiryTime, - getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), - *udk.SignedOID, - *udk.SignedTID, - udkStart, - udkExpiry, - *udk.SignedService, - *udk.SignedVersion, - v.AuthorizedObjectID, - v.UnauthorizedObjectID, - v.CorrelationID, - v.IPRange.String(), - string(v.Protocol), - v.Version, - resource, - snapshotTime, // signed timestamp - v.EncryptionScope, - v.CacheControl, // rscc - v.ContentDisposition, // rscd - v.ContentEncoding, // rsce - v.ContentLanguage, // rscl - v.ContentType}, // rsct - "\n") - - signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) - if err != nil { - return QueryParameters{}, err - } - - p := QueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - encryptionScope: v.EncryptionScope, - - // Container/Blob-specific SAS parameters - resource: resource, - identifier: v.Identifier, - cacheControl: v.CacheControl, - contentDisposition: v.ContentDisposition, - contentEncoding: v.ContentEncoding, - contentLanguage: v.ContentLanguage, - contentType: v.ContentType, - snapshotTime: v.SnapshotTime, - signedDirectoryDepth: getDirectoryDepth(v.Directory), - authorizedObjectID: v.AuthorizedObjectID, - unauthorizedObjectID: v.UnauthorizedObjectID, - correlationID: v.CorrelationID, - // Calculated SAS signature - signature: signature, - } - - //User delegation SAS specific parameters - p.signedOID = *udk.SignedOID - p.signedTID = *udk.SignedTID - p.signedStart = *udk.SignedStart - p.signedExpiry = *udk.SignedExpiry - p.signedService = *udk.SignedService - p.signedVersion = *udk.SignedVersion - - return p, nil -} - -// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. -func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { - // Container: "/blob/account/containername" - // Blob: "/blob/account/containername/blobname" - elements := []string{"/blob/", account, "/", containerName} - if blobName != "" { - elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) - } else if directoryName != "" { - elements = append(elements, "/", directoryName) - } - return strings.Join(elements, "") -} - -// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. -// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. -// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob -type ContainerPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool - Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts -} - -// String produces the SAS permissions string for an Azure Storage container. -// Call this method to set BlobSignatureValues' Permissions field. -func (p *ContainerPermissions) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.DeletePreviousVersion { - b.WriteRune('x') - } - if p.List { - b.WriteRune('l') - } - if p.Tag { - b.WriteRune('t') - } - if p.FilterByTags { - b.WriteRune('f') - } - if p.Move { - b.WriteRune('m') - } - if p.Execute { - b.WriteRune('e') - } - if p.ModifyOwnership { - b.WriteRune('o') - } - if p.ModifyPermissions { - b.WriteRune('p') - } - if p.SetImmutabilityPolicy { - b.WriteRune('i') - } - return b.String() -} - -// Parse initializes ContainerPermissions' fields from a string. -func parseContainerPermissions(s string) (ContainerPermissions, error) { - p := ContainerPermissions{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'x': - p.DeletePreviousVersion = true - case 'l': - p.List = true - case 't': - p.Tag = true - case 'f': - p.FilterByTags = true - case 'm': - p.Move = true - case 'e': - p.Execute = true - case 'o': - p.ModifyOwnership = true - case 'p': - p.ModifyPermissions = true - case 'i': - p.SetImmutabilityPolicy = true - default: - return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) - } - } - return p, nil -} - -// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. -// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. -type BlobPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool -} - -// String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSignatureValues' Permissions field. -func (p *BlobPermissions) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.DeletePreviousVersion { - b.WriteRune('x') - } - if p.PermanentDelete { - b.WriteRune('y') - } - if p.List { - b.WriteRune('l') - } - if p.Tag { - b.WriteRune('t') - } - if p.Move { - b.WriteRune('m') - } - if p.Execute { - b.WriteRune('e') - } - if p.Ownership { - b.WriteRune('o') - } - if p.Permissions { - b.WriteRune('p') - } - if p.SetImmutabilityPolicy { - b.WriteRune('i') - } - return b.String() -} - -// Parse initializes BlobPermissions' fields from a string. -func parseBlobPermissions(s string) (BlobPermissions, error) { - p := BlobPermissions{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'x': - p.DeletePreviousVersion = true - case 'y': - p.PermanentDelete = true - case 'l': - p.List = true - case 't': - p.Tag = true - case 'm': - p.Move = true - case 'e': - p.Execute = true - case 'o': - p.Ownership = true - case 'p': - p.Permissions = true - case 'i': - p.SetImmutabilityPolicy = true - default: - return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) - } - } - return p, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go deleted file mode 100644 index 57fe053f0..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go +++ /dev/null @@ -1,166 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package sas - -import ( - "net/url" - "strings" - - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" -) - -const ( - snapshot = "snapshot" - versionId = "versionid" -) - -// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. -// Ex: "https://10.132.141.33/accountname/containername" -type IPEndpointStyleInfo struct { - AccountName string // "" if not using IP endpoint style -} - -// URLParts object represents the components that make up an Azure Storage Container/Blob URL. -// NOTE: Changing any SAS-related field requires computing a new SAS signature. -type URLParts struct { - Scheme string // Ex: "https://" - Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" - IPEndpointStyleInfo IPEndpointStyleInfo - ContainerName string // "" if no container - BlobName string // "" if no blob - Snapshot string // "" if not a snapshot - SAS QueryParameters - UnparsedParams string - VersionID string // "" if not versioning enabled -} - -// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. -// Any other query parameters remain in the UnparsedParams field. -func ParseURL(u string) (URLParts, error) { - uri, err := url.Parse(u) - if err != nil { - return URLParts{}, err - } - - up := URLParts{ - Scheme: uri.Scheme, - Host: uri.Host, - } - - // Find the container & blob names (if any) - if uri.Path != "" { - path := uri.Path - if path[0] == '/' { - path = path[1:] // If path starts with a slash, remove it - } - if shared.IsIPEndpointStyle(up.Host) { - if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob - up.IPEndpointStyleInfo.AccountName = path - path = "" // No ContainerName present in the URL so path should be empty - } else { - up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes - path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) - } - } - - containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) - if containerEndIndex == -1 { // Slash not found; path has container name & no blob name - up.ContainerName = path - } else { - up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes - up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash - } - } - - // Convert the query parameters to a case-sensitive map & trim whitespace - paramsMap := uri.Query() - - up.Snapshot = "" // Assume no snapshot - if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { - up.Snapshot = snapshotStr[0] - // If we recognized the query parameter, remove it from the map - delete(paramsMap, snapshot) - } - - up.VersionID = "" // Assume no versionID - if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { - up.VersionID = versionIDs[0] - // If we recognized the query parameter, remove it from the map - delete(paramsMap, versionId) // delete "versionid" from paramsMap - delete(paramsMap, "versionId") // delete "versionId" from paramsMap - } - - up.SAS = NewQueryParameters(paramsMap, true) - up.UnparsedParams = paramsMap.Encode() - return up, nil -} - -// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery -// field contains the SAS, snapshot, and unparsed query parameters. -func (up URLParts) String() string { - path := "" - if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { - path += "/" + up.IPEndpointStyleInfo.AccountName - } - // Concatenate container & blob names (if they exist) - if up.ContainerName != "" { - path += "/" + up.ContainerName - if up.BlobName != "" { - path += "/" + up.BlobName - } - } - - rawQuery := up.UnparsedParams - - //If no snapshot is initially provided, fill it in from the SAS query properties to help the user - if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { - up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) - } - - // Concatenate blob version id query parameter (if it exists) - if up.VersionID != "" { - if len(rawQuery) > 0 { - rawQuery += "&" - } - rawQuery += versionId + "=" + up.VersionID - } - - // Concatenate blob snapshot query parameter (if it exists) - if up.Snapshot != "" { - if len(rawQuery) > 0 { - rawQuery += "&" - } - rawQuery += snapshot + "=" + up.Snapshot - } - sas := up.SAS.Encode() - if sas != "" { - if len(rawQuery) > 0 { - rawQuery += "&" - } - rawQuery += sas - } - u := url.URL{ - Scheme: up.Scheme, - Host: up.Host, - Path: path, - RawQuery: rawQuery, - } - return u.String() -} - -type caseInsensitiveValues url.Values // map[string][]string - -func (values caseInsensitiveValues) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - for k, v := range values { - if strings.ToLower(k) == key { - return v, true - } - } - return []string{}, false -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go deleted file mode 100644 index 924fd1081..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go +++ /dev/null @@ -1,94 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package service - -import ( - "context" - "fmt" - "net/url" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" -) - -// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. -// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. -type BatchBuilder struct { - endpoint string - authPolicy policy.Policy - subRequests []*policy.Request - operationType *exported.BlobBatchOperationType -} - -func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { - if bb.operationType == nil { - bb.operationType = &operationType - return nil - } - if *bb.operationType != operationType { - return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) - } - return nil -} - -// Delete operation is used to add delete sub-request to the batch builder. -func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error { - err := bb.checkOperationType(exported.BatchDeleteOperationType) - if err != nil { - return err - } - - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) - - blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) - if err != nil { - return err - } - - deleteOptions, leaseInfo, accessConditions := options.format() - req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) - if err != nil { - return err - } - - // remove x-ms-version header - exported.UpdateSubRequestHeaders(req) - - bb.subRequests = append(bb.subRequests, req) - return nil -} - -// SetTier operation is used to add set tier sub-request to the batch builder. -func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { - err := bb.checkOperationType(exported.BatchSetTierOperationType) - if err != nil { - return err - } - - blobName = url.PathEscape(blobName) - blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) - - blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) - if err != nil { - return err - } - - setTierOptions, leaseInfo, accessConditions := options.format() - req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) - if err != nil { - return err - } - - // remove x-ms-version header - exported.UpdateSubRequestHeaders(req) - - bb.subRequests = append(bb.subRequests, req) - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go deleted file mode 100644 index ccf4159c2..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ /dev/null @@ -1,377 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package service - -import ( - "bytes" - "context" - "errors" - "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" -) - -// ClientOptions contains the optional parameters when creating a Client. -type ClientOptions base.ClientOptions - -// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. -type Client base.Client[generated.ServiceClient] - -// NewClient creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ -// - cred - an Azure AD credential, typically obtained via the azidentity module -// - options - client options; pass nil to accept the default values -func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - audience := base.GetAudience((*base.ClientOptions)(options)) - authPolicy := shared.NewStorageChallengePolicy(cred, audience) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithNoCredential creates an instance of Client with the specified values. -// This is used to anonymously access a storage account or with a shared access signature (SAS) token. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? -// - options - client options; pass nil to accept the default values -func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - conOptions := shared.GetClientOptions(options) - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. -// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ -// - cred - a SharedKeyCredential created with the matching storage account and access key -// - options - client options; pass nil to accept the default values -func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { - authPolicy := exported.NewSharedKeyCredPolicy(cred) - conOptions := shared.GetClientOptions(options) - plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - - azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) - if err != nil { - return nil, err - } - - return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil -} - -// NewClientFromConnectionString creates an instance of Client with the specified values. -// - connectionString - a connection string for the desired storage account -// - options - client options; pass nil to accept the default values -func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - parsed, err := shared.ParseConnectionString(connectionString) - if err != nil { - return nil, err - } - - if parsed.AccountKey != "" && parsed.AccountName != "" { - credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) - if err != nil { - return nil, err - } - return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) - } - - return NewClientWithNoCredential(parsed.ServiceURL, options) -} - -// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. -// OAuth is required for this call, as well as any role that can delegate access to the storage account. -func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { - url, err := blob.ParseURL(s.URL()) - if err != nil { - return nil, err - } - - getUserDelegationKeyOptions := o.format() - udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) - if err != nil { - return nil, err - } - - return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil -} - -func (s *Client) generated() *generated.ServiceClient { - return base.InnerClient((*base.Client[generated.ServiceClient])(s)) -} - -func (s *Client) sharedKey() *SharedKeyCredential { - return base.SharedKey((*base.Client[generated.ServiceClient])(s)) -} - -func (s *Client) credential() any { - return base.Credential((*base.Client[generated.ServiceClient])(s)) -} - -// helper method to return the generated.BlobClient which is used for creating the sub-requests -func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { - return base.InnerClient((*base.Client[generated.BlobClient])(b)) -} - -func (s *Client) getClientOptions() *base.ClientOptions { - return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) -} - -// URL returns the URL endpoint used by the Client object. -func (s *Client) URL() string { - return s.generated().Endpoint() -} - -// NewContainerClient creates a new container.Client object by concatenating containerName to the end of -// this Client's URL. The new container.Client uses the same request policy pipeline as the Client. -func (s *Client) NewContainerClient(containerName string) *container.Client { - containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) -} - -// CreateContainer is a lifecycle method to creates a new container under the specified account. -// If the container with the same name already exists, a ResourceExistsError will be raised. -// This method returns a client with which to interact with the newly created container. -func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) { - containerClient := s.NewContainerClient(containerName) - containerCreateResp, err := containerClient.Create(ctx, options) - return containerCreateResp, err -} - -// DeleteContainer is a lifecycle method that marks the specified container for deletion. -// The container and any blobs contained within it are later deleted during garbage collection. -// If the container is not found, a ResourceNotFoundError will be raised. -func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) { - containerClient := s.NewContainerClient(containerName) - containerDeleteResp, err := containerClient.Delete(ctx, options) - return containerDeleteResp, err -} - -// RestoreContainer restores soft-deleted container -// Operation will only be successful if used within the specified number of days set in the delete retention policy -func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) { - containerClient := s.NewContainerClient(deletedContainerName) - containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options) - return containerRestoreResp, err -} - -// GetAccountInfo provides account level information -// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. -func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { - getAccountInfoOptions := o.format() - resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) - return resp, err -} - -// NewListContainersPager operation returns a pager of the containers under the specified account. -// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. -func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { - listOptions := generated.ServiceClientListContainersSegmentOptions{} - if o != nil { - if o.Include.Deleted { - listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted) - } - if o.Include.Metadata { - listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) - } - if o.Include.System { - listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) - } - listOptions.Marker = o.Marker - listOptions.Maxresults = o.MaxResults - listOptions.Prefix = o.Prefix - } - return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{ - More: func(page ListContainersResponse) bool { - return page.NextMarker != nil && len(*page.NextMarker) > 0 - }, - Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) - } else { - listOptions.Marker = page.NextMarker - req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) - } - if err != nil { - return ListContainersResponse{}, err - } - resp, err := s.generated().InternalClient().Pipeline().Do(req) - if err != nil { - return ListContainersResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ListContainersResponse{}, runtime.NewResponseError(resp) - } - return s.generated().ListContainersSegmentHandleResponse(resp) - }, - }) -} - -// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { - getPropertiesOptions := o.format() - resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) - return resp, err -} - -// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. -// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. -func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { - properties, setPropertiesOptions := o.format() - resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) - return resp, err -} - -// GetStatistics Retrieves statistics related to replication for the Blob service. -// It is only available when read-access geo-redundant replication is enabled for the storage account. -// With geo-redundant replication, Azure Storage maintains your data durable -// in two locations. In both locations, Azure Storage constantly maintains -// multiple healthy replicas of your data. The location where you read, -// create, update, or delete data is the primary storage account location. -// The primary location exists in the region you choose at the time you -// create an account via the Azure Management Azure classic portal, for -// example, North Central US. The location to which your data is replicated -// is the secondary location. The secondary location is automatically -// determined based on the location of the primary; it is in a second data -// center that resides in the same region as the primary location. Read-only -// access is available from the secondary location, if read-access geo-redundant -// replication is enabled for your storage account. -func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { - getStatisticsOptions := o.format() - resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) - - return resp, err -} - -// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. -// It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { - if s.sharedKey() == nil { - return "", bloberror.MissingSharedKeyCredential - } - st := o.format() - qps, err := sas.AccountSignatureValues{ - Version: sas.Version, - Protocol: sas.ProtocolHTTPS, - Permissions: permissions.String(), - ResourceTypes: resources.String(), - StartTime: st, - ExpiryTime: expiry.UTC(), - }.SignWithSharedKey(s.sharedKey()) - if err != nil { - return "", err - } - - endpoint := s.URL() - if !strings.HasSuffix(endpoint, "/") { - // add a trailing slash to be consistent with the portal - endpoint += "/" - } - endpoint += "?" + qps.Encode() - - return endpoint, nil -} - -// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression. -// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. -// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags -// eg. "dog='germanshepherd' and penguin='emperorpenguin'" -// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { - serviceFilterBlobsOptions := o.format() - resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions) - return resp, err -} - -// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. -// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. -// All sub-requests in the batch must be of the same type, either delete or set tier. -// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS. -func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { - var authPolicy policy.Policy - - switch cred := s.credential().(type) { - case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(s.getClientOptions())) - case *SharedKeyCredential: - authPolicy = exported.NewSharedKeyCredPolicy(cred) - case nil: - // for authentication using SAS - authPolicy = nil - default: - return nil, fmt.Errorf("unrecognised authentication type %T", cred) - } - - return &BatchBuilder{ - endpoint: s.URL(), - authPolicy: authPolicy, - }, nil -} - -// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. -// It builds the request body using the BatchBuilder object passed. -// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. -func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { - if bb == nil || len(bb.subRequests) == 0 { - return SubmitBatchResponse{}, errors.New("batch builder is empty") - } - - // create the request body - batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ - AuthPolicy: bb.authPolicy, - SubRequests: bb.subRequests, - }) - if err != nil { - return SubmitBatchResponse{}, err - } - - reader := bytes.NewReader(batchReq) - rsc := streaming.NopCloser(reader) - multipartContentType := "multipart/mixed; boundary=" + batchID - - resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) - if err != nil { - return SubmitBatchResponse{}, err - } - - batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) - if err != nil { - return SubmitBatchResponse{}, err - } - - return SubmitBatchResponse{ - Responses: batchResponses, - ContentType: resp.ContentType, - RequestID: resp.RequestID, - Version: resp.Version, - }, nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go deleted file mode 100644 index 20665fc2b..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package service - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -const ( - // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. - ContainerNameRoot = "$root" - - // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. - ContainerNameLogs = "$logs" -) - -// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS -type SKUName = generated.SKUName - -const ( - SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS - SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS - SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS - SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS - SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS -) - -// PossibleSKUNameValues returns the possible values for the SKUName const type. -func PossibleSKUNameValues() []SKUName { - return generated.PossibleSKUNameValues() -} - -// ListContainersIncludeType defines values for ListContainersIncludeType -type ListContainersIncludeType = generated.ListContainersIncludeType - -const ( - ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata - ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted - ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem -) - -// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. -func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { - return generated.PossibleListContainersIncludeTypeValues() -} - -// AccountKind defines values for AccountKind -type AccountKind = generated.AccountKind - -const ( - AccountKindStorage AccountKind = generated.AccountKindStorage - AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage - AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 - AccountKindFileStorage AccountKind = generated.AccountKindFileStorage - AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage -) - -// PossibleAccountKindValues returns the possible values for the AccountKind const type. -func PossibleAccountKindValues() []AccountKind { - return generated.PossibleAccountKindValues() -} - -// BlobGeoReplicationStatus - The status of the secondary location -type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus - -const ( - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive - BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap - BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable -) - -// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. -func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { - return generated.PossibleBlobGeoReplicationStatusValues() -} - -// PublicAccessType defines values for AccessType - private (default) or blob or container -type PublicAccessType = generated.PublicAccessType - -const ( - PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob - PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer -) - -// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. -func PossiblePublicAccessTypeValues() []PublicAccessType { - return generated.PossiblePublicAccessTypeValues() -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go deleted file mode 100644 index b70724d79..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ /dev/null @@ -1,361 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package service - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" - "time" -) - -// SharedKeyCredential contains an account's name and its primary or secondary key. -type SharedKeyCredential = exported.SharedKeyCredential - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - return exported.NewSharedKeyCredential(accountName, accountKey) -} - -// UserDelegationCredential contains an account's name and its user delegation key. -type UserDelegationCredential = exported.UserDelegationCredential - -// UserDelegationKey contains UserDelegationKey. -type UserDelegationKey = generated.UserDelegationKey - -// KeyInfo contains KeyInfo struct. -type KeyInfo = generated.KeyInfo - -// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method. -type GetUserDelegationCredentialOptions struct { - // placeholder for future options -} - -func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions { - return nil -} - -// AccessConditions identifies container-specific access conditions which you optionally set. -type AccessConditions = exported.ContainerAccessConditions - -// BlobTag - a key/value pair on a blob -type BlobTag = generated.BlobTag - -// ContainerItem - An Azure Storage container returned from method Client.ListContainersSegment. -type ContainerItem = generated.ContainerItem - -// ContainerProperties - Properties of a container -type ContainerProperties = generated.ContainerProperties - -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo = generated.CPKInfo - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo = generated.CPKScopeInfo - -// CreateContainerOptions contains the optional parameters for the container.Client.Create method. -type CreateContainerOptions = container.CreateOptions - -// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. -type DeleteContainerOptions = container.DeleteOptions - -// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. -type RestoreContainerOptions = container.RestoreOptions - -// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another -// domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain. -type CORSRule = generated.CORSRule - -// FilterBlobSegment - The result of a Filter Blobs API call. -type FilterBlobSegment = generated.FilterBlobSegment - -// BlobTags - Blob tags -type BlobTags = generated.BlobTags - -// FilterBlobItem - Blob info returned from method Client.FilterBlobs. -type FilterBlobItem = generated.FilterBlobItem - -// GeoReplication - Geo-Replication information for the Secondary Storage Service. -type GeoReplication = generated.GeoReplication - -// RetentionPolicy - the retention policy which determines how long the associated data should persist. -type RetentionPolicy = generated.RetentionPolicy - -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs. -type Metrics = generated.Metrics - -// Logging - Azure Analytics Logging settings. -type Logging = generated.Logging - -// StaticWebsite - The properties that enable an account to host a static website. -type StaticWebsite = generated.StaticWebsite - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties = generated.StorageServiceProperties - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats = generated.StorageServiceStats - -// --------------------------------------------------------------------------------------------------------------------- - -// GetAccountInfoOptions provides set of options for Client.GetAccountInfo -type GetAccountInfoOptions struct { - // placeholder for future options -} - -func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. -type GetPropertiesOptions struct { - // placeholder for future options -} - -func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// ListContainersOptions provides set of configurations for ListContainers operation. -type ListContainersOptions struct { - Include ListContainersInclude - - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing operation did not return all containers - // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in - // a subsequent call to request the next page of list items. The marker value is opaque to the client. - Marker *string - - // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, - // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible - // that the service will return fewer results than specified by max results, or than the default of 5000. - MaxResults *int32 - - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string -} - -// ListContainersInclude indicates what additional information the service should return with each container. -type ListContainersInclude struct { - // Tells the service whether to return metadata for each container. - Metadata bool - - // Tells the service whether to return soft-deleted containers. - Deleted bool - - // Tells the service whether to return system containers. - System bool -} - -// --------------------------------------------------------------------------------------------------------------------- - -// SetPropertiesOptions provides set of options for Client.SetProperties -type SetPropertiesOptions struct { - // The set of CORS rules. - CORS []*CORSRule - - // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions. - DefaultServiceVersion *string - - // the retention policy which determines how long the associated data should persist. - DeleteRetentionPolicy *RetentionPolicy - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - // If version is not set - we default to "1.0" - HourMetrics *Metrics - - // Azure Analytics Logging settings. - // If version is not set - we default to "1.0" - Logging *Logging - - // a summary of request statistics grouped by API in hour or minute aggregates for blobs - // If version is not set - we default to "1.0" - MinuteMetrics *Metrics - - // The properties that enable an account to host a static website. - StaticWebsite *StaticWebsite -} - -func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { - if o == nil { - return generated.StorageServiceProperties{}, nil - } - - defaultVersion := to.Ptr[string]("1.0") - defaultAge := to.Ptr[int32](0) - emptyStr := to.Ptr[string]("") - - if o.CORS != nil { - for i := 0; i < len(o.CORS); i++ { - if o.CORS[i].AllowedHeaders == nil { - o.CORS[i].AllowedHeaders = emptyStr - } - if o.CORS[i].ExposedHeaders == nil { - o.CORS[i].ExposedHeaders = emptyStr - } - if o.CORS[i].MaxAgeInSeconds == nil { - o.CORS[i].MaxAgeInSeconds = defaultAge - } - } - } - - if o.HourMetrics != nil { - if o.HourMetrics.Version == nil { - o.HourMetrics.Version = defaultVersion - } - } - - if o.Logging != nil { - if o.Logging.Version == nil { - o.Logging.Version = defaultVersion - } - } - - if o.MinuteMetrics != nil { - if o.MinuteMetrics.Version == nil { - o.MinuteMetrics.Version = defaultVersion - } - - } - - return generated.StorageServiceProperties{ - CORS: o.CORS, - DefaultServiceVersion: o.DefaultServiceVersion, - DeleteRetentionPolicy: o.DeleteRetentionPolicy, - HourMetrics: o.HourMetrics, - Logging: o.Logging, - MinuteMetrics: o.MinuteMetrics, - StaticWebsite: o.StaticWebsite, - }, nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. -type GetSASURLOptions struct { - StartTime *time.Time -} - -func (o *GetSASURLOptions) format() time.Time { - if o == nil { - return time.Time{} - } - - var st time.Time - if o.StartTime != nil { - st = o.StartTime.UTC() - } else { - st = time.Time{} - } - return st -} - -// --------------------------------------------------------------------------------------------------------------------- - -// GetStatisticsOptions provides set of options for Client.GetStatistics -type GetStatisticsOptions struct { - // placeholder for future options -} - -func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { - return nil -} - -// --------------------------------------------------------------------------------------------------------------------- - -// FilterBlobsOptions provides set of options for Client.FindBlobsByTags. -type FilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - MaxResults *int32 -} - -func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { - if o == nil { - return nil - } - return &generated.ServiceClientFilterBlobsOptions{ - Marker: o.Marker, - Maxresults: o.MaxResults, - } -} - -// --------------------------------------------------------------------------------------------------------------------- - -// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. -type BatchDeleteOptions struct { - blob.DeleteOptions - VersionID *string - Snapshot *string -} - -func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := generated.BlobClientDeleteOptions{ - DeleteSnapshots: o.DeleteSnapshots, - DeleteType: o.BlobDeleteType, // None by default - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, modifiedAccessConditions -} - -// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. -type BatchSetTierOptions struct { - blob.SetTierOptions - VersionID *string - Snapshot *string -} - -func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { - if o == nil { - return nil, nil, nil - } - - basics := generated.BlobClientSetTierOptions{ - RehydratePriority: o.RehydratePriority, - Snapshot: o.Snapshot, - VersionID: o.VersionID, - } - - leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, modifiedAccessConditions -} - -// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. -type SubmitBatchOptions struct { - // placeholder for future options -} - -func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions { - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go deleted file mode 100644 index 2dbf97165..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go +++ /dev/null @@ -1,63 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package service - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -) - -// CreateContainerResponse contains the response from method container.Client.Create. -type CreateContainerResponse = generated.ContainerClientCreateResponse - -// DeleteContainerResponse contains the response from method container.Client.Delete -type DeleteContainerResponse = generated.ContainerClientDeleteResponse - -// RestoreContainerResponse contains the response from method container.Client.Restore -type RestoreContainerResponse = generated.ContainerClientRestoreResponse - -// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. -type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse - -// ListContainersResponse contains the response from method Client.ListContainersSegment. -type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse = generated.ListContainersSegmentResponse - -// GetPropertiesResponse contains the response from method Client.GetProperties. -type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse - -// SetPropertiesResponse contains the response from method Client.SetProperties. -type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse - -// GetStatisticsResponse contains the response from method Client.GetStatistics. -type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse - -// FilterBlobsResponse contains the response from method Client.FilterBlobs. -type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse - -// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. -type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse - -// SubmitBatchResponse contains the response from method Client.SubmitBatch. -type SubmitBatchResponse struct { - // Responses contains the responses of the sub-requests in the batch - Responses []*BatchResponseItem - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BatchResponseItem contains the response for the individual sub-requests. -type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json deleted file mode 100644 index c6259f7ab..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json +++ /dev/null @@ -1,579 +0,0 @@ -{ - "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", - "parameters": { - "baseName": { - "type": "String" - }, - "tenantId": { - "type": "string", - "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", - "metadata": { - "description": "The tenant ID to which the application and resources belong." - } - }, - "testApplicationOid": { - "type": "string", - "metadata": { - "description": "The principal to assign the role to. This is application object id." - } - } - }, - "variables": { - "mgmtApiVersion": "2022-09-01", - "authorizationApiVersion": "2018-09-01-preview", - "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", - "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", - "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", - "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", - "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", - "primaryEncryptionScopeName": "encryptionScope", - "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", - "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", - "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", - "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", - "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", - "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", - "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", - "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", - "location": "[resourceGroup().location]", - "resourceGroupName": "[resourceGroup().name]", - "subscriptionId": "[subscription().subscriptionId]", - "encryption": { - "services": { - "file": { - "enabled": true - }, - "blob": { - "enabled": true - } - }, - "keySource": "Microsoft.Storage" - }, - "networkAcls": { - "bypass": "AzureServices", - "virtualNetworkRules": [], - "ipRules": [], - "defaultAction": "Allow" - } - }, - "resources": [ - { - "type": "Microsoft.Authorization/roleAssignments", - "apiVersion": "[variables('authorizationApiVersion')]", - "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", - "properties": { - "roleDefinitionId": "[variables('blobDataContributorRoleId')]", - "principalId": "[parameters('testApplicationOid')]" - } - }, - { - "type": "Microsoft.Authorization/roleAssignments", - "apiVersion": "[variables('authorizationApiVersion')]", - "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", - "properties": { - "roleDefinitionId": "[variables('contributorRoleId')]", - "principalId": "[parameters('testApplicationOid')]" - } - }, - { - "type": "Microsoft.Authorization/roleAssignments", - "apiVersion": "[variables('authorizationApiVersion')]", - "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", - "properties": { - "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", - "principalId": "[parameters('testApplicationOid')]" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('primaryAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts/blobServices", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[concat(variables('primaryAccountName'), '/default')]", - "properties": { - "isVersioningEnabled": true, - "lastAccessTimeTrackingPolicy": { - "enable": true, - "name": "AccessTimeTracking", - "trackingGranularityInDays": 1, - "blobType": [ - "blockBlob" - ] - } - }, - "dependsOn": [ - "[variables('primaryAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('immutableAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot", - "immutableStorageWithVersioning": { - "enabled": true - } - } - }, - { - "type": "Microsoft.Storage/storageAccounts/blobServices", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[concat(variables('immutableAccountName'), '/default')]", - "properties": { - "isVersioningEnabled": true, - "lastAccessTimeTrackingPolicy": { - "enable": true, - "name": "AccessTimeTracking", - "trackingGranularityInDays": 1, - "blobType": [ - "blockBlob" - ] - } - }, - "dependsOn": [ - "[variables('immutableAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts/encryptionScopes", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('primaryEncryptionScope')]", - "properties": { - "source": "Microsoft.Storage", - "state": "Enabled" - }, - "dependsOn": [ - "[variables('primaryAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('secondaryAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('premiumAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Premium_LRS", - "tier": "Premium" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('dataLakeAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "isHnsEnabled": true, - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts/blobServices", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[concat(variables('dataLakeAccountName'), '/default')]", - "properties": { - "containerDeleteRetentionPolicy": { - "enabled": true, - "days": 1 - } - }, - "dependsOn": [ - "[variables('dataLakeAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('softDeleteAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts/blobServices", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[concat(variables('softDeleteAccountName'), '/default')]", - "properties": { - "isVersioningEnabled": true, - "deleteRetentionPolicy": { - "allowPermanentDelete": true, - "enabled": true, - "days": 1 - }, - "containerDeleteRetentionPolicy": { - "enabled": true, - "days": 1 - } - }, - "dependsOn": [ - "[variables('softDeleteAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts/fileServices", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[concat(variables('softDeleteAccountName'), '/default')]", - "properties": { - "shareDeleteRetentionPolicy": { - "enabled": true, - "days": 1 - } - }, - "dependsOn": [ - "[variables('softDeleteAccountName')]" - ] - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('premiumFileAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Premium_LRS", - "tier": "Premium" - }, - "kind": "FileStorage", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('webjobsPrimaryAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - }, - { - "type": "Microsoft.Storage/storageAccounts", - "apiVersion": "[variables('mgmtApiVersion')]", - "name": "[variables('webjobsSecondaryAccountName')]", - "location": "[variables('location')]", - "sku": { - "name": "Standard_RAGRS", - "tier": "Standard" - }, - "kind": "StorageV2", - "properties": { - "networkAcls": "[variables('networkAcls')]", - "supportsHttpsTrafficOnly": true, - "encryption": "[variables('encryption')]", - "accessTier": "Hot" - } - } - ], - "functions": [ - { - "namespace": "url", - "members": { - "serviceEndpointSuffix": { - "parameters": [ - { - "name": "endpoint", - "type": "string" - } - ], - "output": { - "type": "string", - "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" - } - } - } - }, - { - "namespace": "connectionString", - "members": { - "create": { - "parameters": [ - { - "name": "accountName", - "type": "string" - }, - { - "name": "accountKey", - "type": "string" - }, - { - "name": "blobEndpoint", - "type": "string" - }, - { - "name": "queueEndpoint", - "type": "string" - }, - { - "name": "fileEndpoint", - "type": "string" - }, - { - "name": "tableEndpoint", - "type": "string" - } - ], - "output": { - "type": "string", - "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" - } - } - } - } - ], - "outputs": { - "AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('primaryAccountName')]" - }, - "AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" - }, - "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('secondaryAccountName')]" - }, - "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" - }, - "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "BLOB_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('secondaryAccountName')]" - }, - "BLOB_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('premiumAccountName')]" - }, - "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('dataLakeAccountName')]" - }, - "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" - }, - "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('immutableAccountName')]" - }, - "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" - }, - "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('softDeleteAccountName')]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" - }, - "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { - "type": "string", - "value": "[variables('premiumFileAccountName')]" - }, - "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { - "type": "string", - "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" - }, - "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { - "type": "string", - "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" - }, - "AZUREWEBJOBSSTORAGE": { - "type": "string", - "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "AZUREWEBJOBSSECONDARYSTORAGE": { - "type": "string", - "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" - }, - "RESOURCE_GROUP_NAME": { - "type": "string", - "value": "[variables('resourceGroupName')]" - }, - "SUBSCRIPTION_ID": { - "type": "string", - "value": "[variables('subscriptionId')]" - }, - "LOCATION": { - "type": "string", - "value": "[variables('location')]" - }, - "AZURE_STORAGE_ENCRYPTION_SCOPE": { - "type": "string", - "value": "[variables('primaryEncryptionScopeName')]" - } - } - } - \ No newline at end of file diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c3..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd23..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab6..000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cd..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index 5599082ae..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,196 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" - windows "golang.org/x/sys/windows" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - - // syscall uses negative numbers - // windows package uses very big uint32 - // Keep these switches split so we don't have to convert ints too much. - switch uint32(nFile) { - case windows.STD_INPUT_HANDLE: - file = os.Stdin - case windows.STD_OUTPUT_HANDLE: - file = os.Stdout - case windows.STD_ERROR_HANDLE: - file = os.Stderr - default: - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b9..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea72..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d0..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75a..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes deleted file mode 100644 index 94f480de9..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index 815e20660..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.vscode/ - -*.exe - -# testing -testdata - -# go workspaces -go.work -go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml deleted file mode 100644 index faedfe937..000000000 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ /dev/null @@ -1,147 +0,0 @@ -linters: - enable: - # style - - containedctx # struct contains a context - - dupl # duplicate code - - errname # erorrs are named correctly - - nolintlint # "//nolint" directives are properly explained - - revive # golint replacement - - unconvert # unnecessary conversions - - wastedassign - - # bugs, performance, unused, etc ... - - contextcheck # function uses a non-inherited context - - errorlint # errors not wrapped for 1.13 - - exhaustive # check exhaustiveness of enum switch statements - - gofmt # files are gofmt'ed - - gosec # security - - nilerr # returns nil even with non-nil error - - thelper # test helpers without t.Helper() - - unparam # unused function params - -issues: - exclude-dirs: - - pkg/etw/sample - - exclude-rules: - # err is very often shadowed in nested scopes - - linters: - - govet - text: '^shadow: declaration of "err" shadows declaration' - - # ignore long lines for skip autogen directives - - linters: - - revive - text: "^line-length-limit: " - source: "^//(go:generate|sys) " - - #TODO: remove after upgrading to go1.18 - # ignore comment spacing for nolint and sys directives - - linters: - - revive - text: "^comment-spacings: no space between comment delimiter and comment text" - source: "//(cspell:|nolint:|sys |todo)" - - # not on go 1.18 yet, so no any - - linters: - - revive - text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'" - - # allow unjustified ignores of error checks in defer statements - - linters: - - nolintlint - text: "^directive `//nolint:errcheck` should provide explanation" - source: '^\s*defer ' - - # allow unjustified ignores of error lints for io.EOF - - linters: - - nolintlint - text: "^directive `//nolint:errorlint` should provide explanation" - source: '[=|!]= io.EOF' - - -linters-settings: - exhaustive: - default-signifies-exhaustive: true - govet: - enable-all: true - disable: - # struct order is often for Win32 compat - # also, ignore pointer bytes/GC issues for now until performance becomes an issue - - fieldalignment - nolintlint: - require-explanation: true - require-specific: true - revive: - # revive is more configurable than static check, so likely the preferred alternative to static-check - # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) - enable-all-rules: - true - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - # rules with required arguments - - name: argument-limit - disabled: true - - name: banned-characters - disabled: true - - name: cognitive-complexity - disabled: true - - name: cyclomatic - disabled: true - - name: file-header - disabled: true - - name: function-length - disabled: true - - name: function-result-limit - disabled: true - - name: max-public-structs - disabled: true - # geneally annoying rules - - name: add-constant # complains about any and all strings and integers - disabled: true - - name: confusing-naming # we frequently use "Foo()" and "foo()" together - disabled: true - - name: flag-parameter # excessive, and a common idiom we use - disabled: true - - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead - disabled: true - # general config - - name: line-length-limit - arguments: - - 140 - - name: var-naming - arguments: - - [] - - - CID - - CRI - - CTRD - - DACL - - DLL - - DOS - - ETW - - FSCTL - - GCS - - GMSA - - HCS - - HV - - IO - - LCOW - - LDAP - - LPAC - - LTSC - - MMIO - - NT - - OCI - - PMEM - - PWSH - - RX - - SACl - - SID - - SMB - - TX - - VHD - - VHDX - - VMID - - VPCI - - WCOW - - WIM diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b4942b..000000000 --- a/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d77..000000000 --- a/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 7474b4f0b..000000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. -Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that -you have the right to, and actually do, grant us the rights to use your contribution. -For details, visit [Microsoft CLA](https://cla.microsoft.com). - -When you submit a pull request, a CLA-bot will automatically determine whether you need to -provide a CLA and decorate the PR appropriately (e.g., label, comment). -Simply follow the instructions provided by the bot. -You will only need to do this once across all repos using our CLA. - -Additionally, the pull request pipeline requires the following steps to be performed before -mergining. - -### Code Sign-Off - -We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] -to certify they either authored the work themselves or otherwise have permission to use it in this project. - -A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. - -Please see [the developer certificate](https://developercertificate.org) for more info, -as well as to make sure that you can attest to the rules listed. -Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - -### Linting - -Code must pass a linting stage, which uses [`golangci-lint`][lint]. -The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run -automatically with VSCode by adding the following to your workspace or folder settings: - -```json - "go.lintTool": "golangci-lint", - "go.lintOnSave": "package", -``` - -Additional editor [integrations options are also available][lint-ide]. - -Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: - -```shell -# use . or specify a path to only lint a package -# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" -> golangci-lint run ./... -``` - -### Go Generate - -The pipeline checks that auto-generated code, via `go generate`, are up to date. - -This can be done for the entire repo: - -```shell -> go generate ./... -``` - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Special Thanks - -Thanks to [natefinch][natefinch] for the inspiration for this library. -See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. - -[lint]: https://golangci-lint.run/ -[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration -[lint-install]: https://golangci-lint.run/usage/install/#local-installation - -[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s -[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff - -[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md deleted file mode 100644 index 869fdfe2b..000000000 --- a/vendor/github.com/Microsoft/go-winio/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index b54341daa..000000000 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,287 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "runtime" - "unicode/utf16" - - "github.com/Microsoft/go-winio/internal/fs" - "golang.org/x/sys/windows" -) - -//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId //revive:disable-line:var-naming ID, not Id - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -//nolint:revive // var-naming: ALL_CAPS -const ( - WRITE_DAC = windows.WRITE_DAC - WRITE_OWNER = windows.WRITE_OWNER - ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - //revive:disable-next-line:var-naming ID, not Id - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamID struct { - StreamID uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(io.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamID - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamID, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = windows.UTF16ToString(name) - } - if wsi.StreamID == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamID{ - StreamID: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - h, err := fs.CreateFile(path, - fs.AccessMask(access), - fs.FileShareMode(share), - nil, - fs.FileCreationDisposition(createmode), - fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, - 0, - ) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go deleted file mode 100644 index 1f5bfe2d5..000000000 --- a/vendor/github.com/Microsoft/go-winio/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// This package provides utilities for efficiently performing Win32 IO operations in Go. -// Currently, this package is provides support for genreal IO and management of -// - named pipes -// - files -// - [Hyper-V sockets] -// -// This code is similar to Go's [net] package, and uses IO completion ports to avoid -// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. -// -// This limits support to Windows Vista and newer operating systems. -// -// Additionally, this package provides support for: -// - creating and managing GUIDs -// - writing to [ETW] -// - opening and manageing VHDs -// - parsing [Windows Image files] -// - auto-generating Win32 API code -// -// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service -// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- -// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images -package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index e104dbdfd..000000000 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return ea, nb, err - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return ea, nb, err - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return ea, nb, err -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return eas, err -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index fe82a180d..000000000 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,320 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" - - "golang.org/x/sys/windows" -) - -//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (*timeoutError) Error() string { return "i/o timeout" } -func (*timeoutError) Timeout() bool { return true } -func (*timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort windows.Handle - -// ioResult contains the result of an asynchronous IO operation. -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO. -type ioOperation struct { - o windows.Overlapped - ch chan ioResult -} - -func initIO() { - h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle windows.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomic.Bool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomic.Bool -} - -// makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h windows.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIO) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -// Deprecated: use NewOpenFile instead. -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return NewOpenFile(windows.Handle(h)) -} - -func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle. -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.Swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - _ = cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - windows.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed. -func (f *win32File) IsClosed() bool { - return f.closing.Load() -} - -// prepareIO prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIO() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.Load() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h windows.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// todo: helsaawy - create an asyncIO version that takes a context - -// asyncIO processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno - return int(bytes), err - } - - if f.closing.Load() { - _ = cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.Load() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - _ = cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno - return 0, io.EOF - } - return n, err -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIO() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.Load() { - return 0, ErrTimeout - } - - var bytes uint32 - err = windows.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return windows.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.Store(false) - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.Store(true) - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index c860eb991..000000000 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,106 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - _ uint32 // padding -} - -// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing -// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 -// alignment is necessary to pass this as FILE_BASIC_INFO. -type alignedFileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 - FileAttributes uint32 - _ uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &alignedFileBasicInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the - // public API of this module. The data may be unnecessarily aligned. - return (*FileBasicInfo)(unsafe.Pointer(bi)), nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is - // suitable to pass to GetFileInformationByHandleEx. - biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) - if err := windows.SetFileInformationByHandle( - windows.Handle(f.Fd()), - windows.FileBasicInfo, - (*byte)(unsafe.Pointer(&biAligned)), - uint32(unsafe.Sizeof(biAligned)), - ); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), - windows.FileStandardInfo, - (*byte)(unsafe.Pointer(si)), - uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx( - windows.Handle(f.Fd()), - windows.FileIdInfo, - (*byte)(unsafe.Pointer(fileID)), - uint32(unsafe.Sizeof(*fileID)), - ); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index c4fdd9d4a..000000000 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,582 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/socket" - "github.com/Microsoft/go-winio/pkg/guid" -) - -const afHVSock = 34 // AF_HYPERV - -// Well known Service and VM IDs -// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - -// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. -func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 - return guid.GUID{} -} - -// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. -func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff - return guid.GUID{ - Data1: 0xffffffff, - Data2: 0xffff, - Data3: 0xffff, - Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - } -} - -// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. -func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 - return guid.GUID{ - Data1: 0xe0e16197, - Data2: 0xdd56, - Data3: 0x4a10, - Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, - } -} - -// HvsockGUIDSiloHost is the address of a silo's host partition: -// - The silo host of a hosted silo is the utility VM. -// - The silo host of a silo on a physical host is the physical host. -func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 - return guid.GUID{ - Data1: 0x36bd0c5c, - Data2: 0x7276, - Data3: 0x4223, - Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, - } -} - -// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. -func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd - return guid.GUID{ - Data1: 0x90db8b89, - Data2: 0xd35, - Data3: 0x4f79, - Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, - } -} - -// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. -// Listening on this VmId accepts connection from: -// - Inside silos: silo host partition. -// - Inside hosted silo: host of the VM. -// - Inside VM: VM host. -// - Physical host: Not supported. -func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 - return guid.GUID{ - Data1: 0xa42e7cda, - Data2: 0xd03f, - Data3: 0x480c, - Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, - } -} - -// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. -func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 - return guid.GUID{ - Data2: 0xfacb, - Data3: 0x11e6, - Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, - } -} - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -var _ socket.RawSockaddr = &rawHvsockAddr{} - -// Network returns the address's network name, "hvsock". -func (*HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g := hvsockVsockServiceTemplate() // make a copy - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHVSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// Sockaddr returns a pointer to and the size of this struct. -// -// Implements the [socket.RawSockaddr] interface, and allows use in -// [socket.Bind] and [socket.ConnectEx]. -func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { - return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil -} - -// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. -func (r *rawHvsockAddr) FromBytes(b []byte) error { - n := int(unsafe.Sizeof(rawHvsockAddr{})) - - if len(b) < n { - return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) - } - - copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) - if r.Family != afHVSock { - return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) - } - - return nil -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -var _ net.Listener = &HvsockListener{} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -var _ net.Conn = &HvsockConn{} - -func newHVSocket() (*win32File, error) { - fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - windows.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - - var sock *win32File - sock, err = newHVSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - defer func() { - if err != nil { - _ = sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = windows.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHVSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIO() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - // - // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) - if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - - conn := &HvsockConn{ - sock: sock, - } - // The local address returned in the AcceptEx buffer is the same as the Listener socket's - // address. However, the service GUID reported by GetSockName is different from the Listeners - // socket, and is sometimes the same as the local address of the socket that dialed the - // address, with the service GUID.Data1 incremented, but othertimes is different. - // todo: does the local address matter? is the listener's address or the actual address appropriate? - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - - // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(sock.handle, - windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, - (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { - return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) - } - - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). -type HvsockDialer struct { - // Deadline is the time the Dial operation must connect before erroring. - Deadline time.Time - - // Retries is the number of additional connects to try if the connection times out, is refused, - // or the host is unreachable - Retries uint - - // RetryWait is the time to wait after a connection error to retry - RetryWait time.Duration - - rt *time.Timer // redial wait timer -} - -// Dial the Hyper-V socket at addr. -// -// See [HvsockDialer.Dial] for more information. -func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - return (&HvsockDialer{}).Dial(ctx, addr) -} - -// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. -// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between -// retries. -// -// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. -func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { - op := "dial" - // create the conn early to use opErr() - conn = &HvsockConn{ - remote: *addr, - } - - if !d.Deadline.IsZero() { - var cancel context.CancelFunc - ctx, cancel = context.WithDeadline(ctx, d.Deadline) - defer cancel() - } - - // preemptive timeout/cancellation check - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - sock, err := newHVSocket() - if err != nil { - return nil, conn.opErr(op, err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - - sa := addr.raw() - err = socket.Bind(sock.handle, &sa) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("bind", err)) - } - - c, err := sock.prepareIO() - if err != nil { - return nil, conn.opErr(op, err) - } - defer sock.wg.Done() - var bytes uint32 - for i := uint(0); i <= d.Retries; i++ { - err = socket.ConnectEx( - sock.handle, - &sa, - nil, // sendBuf - 0, // sendDataLen - &bytes, - (*windows.Overlapped)(unsafe.Pointer(&c.o))) - _, err = sock.asyncIO(c, nil, bytes, err) - if i < d.Retries && canRedial(err) { - if err = d.redialWait(ctx); err == nil { - continue - } - } - break - } - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) - } - - // update the connection properties, so shutdown can be used - if err = windows.Setsockopt( - sock.handle, - windows.SOL_SOCKET, - windows.SO_UPDATE_CONNECT_CONTEXT, - nil, // optvalue - 0, // optlen - ); err != nil { - return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) - } - - // get the local name - var sal rawHvsockAddr - err = socket.GetSockName(sock.handle, &sal) - if err != nil { - return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) - } - conn.local.fromRaw(&sal) - - // one last check for timeout, since asyncIO doesn't check the context - if err = ctx.Err(); err != nil { - return nil, conn.opErr(op, err) - } - - conn.sock = sock - sock = nil - - return conn, nil -} - -// redialWait waits before attempting to redial, resetting the timer as appropriate. -func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { - if d.RetryWait == 0 { - return nil - } - - if d.rt == nil { - d.rt = time.NewTimer(d.RetryWait) - } else { - // should already be stopped and drained - d.rt.Reset(d.RetryWait) - } - - select { - case <-ctx.Done(): - case <-d.rt.C: - return nil - } - - // stop and drain the timer - if !d.rt.Stop() { - <-d.rt.C - } - return ctx.Err() -} - -// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. -func canRedial(err error) bool { - //nolint:errorlint // guaranteed to be an Errno - switch err { - case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, - windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: - return true - default: - return false - } -} - -func (conn *HvsockConn) opErr(op string, err error) error { - // translate from "file closed" to "socket closed" - if errors.Is(err, ErrFileClosed) { - err = socket.ErrSocketClosed - } - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsarecv", eno) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIO() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - var eno windows.Errno - if errors.As(err, &eno) { - err = os.NewSyscallError("wsasend", eno) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -// shutdown disables sending or receiving on a socket. -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return socket.ErrSocketClosed - } - - err := windows.Shutdown(conn.sock.handle, how) - if err != nil { - // If the connection was closed, shutdowns fail with "not connected" - if errors.Is(err, windows.WSAENOTCONN) || - errors.Is(err, windows.WSAESHUTDOWN) { - err = socket.ErrSocketClosed - } - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(windows.SHUT_RD) - if err != nil { - return conn.opErr("closeread", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(windows.SHUT_WR) - if err != nil { - return conn.opErr("closewrite", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - // todo: implement `SetDeadline` for `win32File` - if err := conn.SetReadDeadline(t); err != nil { - return fmt.Errorf("set read deadline: %w", err) - } - if err := conn.SetWriteDeadline(t); err != nil { - return fmt.Errorf("set write deadline: %w", err) - } - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go deleted file mode 100644 index 1f6538817..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// This package contains Win32 filesystem functionality. -package fs diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go deleted file mode 100644 index 0cd9621df..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ /dev/null @@ -1,262 +0,0 @@ -//go:build windows - -package fs - -import ( - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/stringbuffer" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go - -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW - -const NullHandle windows.Handle = 0 - -// AccessMask defines standard, specific, and generic rights. -// -// Used with CreateFile and NtCreateFile (and co.). -// -// Bitmask: -// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 -// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 -// +---------------+---------------+-------------------------------+ -// |G|G|G|G|Resvd|A| StandardRights| SpecificRights | -// |R|W|E|A| |S| | | -// +-+-------------+---------------+-------------------------------+ -// -// GR Generic Read -// GW Generic Write -// GE Generic Exectue -// GA Generic All -// Resvd Reserved -// AS Access Security System -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask -// -// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants -type AccessMask = windows.ACCESS_MASK - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // Not actually any. - // - // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" - // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters - FILE_ANY_ACCESS AccessMask = 0 - - GENERIC_READ AccessMask = 0x8000_0000 - GENERIC_WRITE AccessMask = 0x4000_0000 - GENERIC_EXECUTE AccessMask = 0x2000_0000 - GENERIC_ALL AccessMask = 0x1000_0000 - ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 - - // Specific Object Access - // from ntioapi.h - - FILE_READ_DATA AccessMask = (0x0001) // file & pipe - FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory - - FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe - FILE_ADD_FILE AccessMask = (0x0002) // directory - - FILE_APPEND_DATA AccessMask = (0x0004) // file - FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory - FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe - - FILE_READ_EA AccessMask = (0x0008) // file & directory - FILE_READ_PROPERTIES AccessMask = FILE_READ_EA - - FILE_WRITE_EA AccessMask = (0x0010) // file & directory - FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA - - FILE_EXECUTE AccessMask = (0x0020) // file - FILE_TRAVERSE AccessMask = (0x0020) // directory - - FILE_DELETE_CHILD AccessMask = (0x0040) // directory - - FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all - - FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all - - FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) - FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) - FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) - FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) - - SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF - - // Standard Access - // from ntseapi.h - - DELETE AccessMask = 0x0001_0000 - READ_CONTROL AccessMask = 0x0002_0000 - WRITE_DAC AccessMask = 0x0004_0000 - WRITE_OWNER AccessMask = 0x0008_0000 - SYNCHRONIZE AccessMask = 0x0010_0000 - - STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 - - STANDARD_RIGHTS_READ AccessMask = READ_CONTROL - STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL - STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL - - STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 -) - -type FileShareMode uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - FILE_SHARE_NONE FileShareMode = 0x00 - FILE_SHARE_READ FileShareMode = 0x01 - FILE_SHARE_WRITE FileShareMode = 0x02 - FILE_SHARE_DELETE FileShareMode = 0x04 - FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 -) - -type FileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - CREATE_NEW FileCreationDisposition = 0x01 - CREATE_ALWAYS FileCreationDisposition = 0x02 - OPEN_EXISTING FileCreationDisposition = 0x03 - OPEN_ALWAYS FileCreationDisposition = 0x04 - TRUNCATE_EXISTING FileCreationDisposition = 0x05 -) - -// Create disposition values for NtCreate* -type NTFileCreationDisposition uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_SUPERSEDE NTFileCreationDisposition = 0x00 - FILE_OPEN NTFileCreationDisposition = 0x01 - FILE_CREATE NTFileCreationDisposition = 0x02 - FILE_OPEN_IF NTFileCreationDisposition = 0x03 - FILE_OVERWRITE NTFileCreationDisposition = 0x04 - FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 - FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 -) - -// CreateFile and co. take flags or attributes together as one parameter. -// Define alias until we can use generics to allow both -// -// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants -type FileFlagOrAttribute uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winnt.h - - FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 - FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 - FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 - FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 - FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 - FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 - FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 - FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 - FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 - FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 - FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 -) - -// NtCreate* functions take a dedicated CreateOptions parameter. -// -// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile -// -// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file -type NTCreateOptions uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // From ntioapi.h - - FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 - FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 - FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 - FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 - - FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 - FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 - FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 - FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 - - FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 - FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 - FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 - FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 - - FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 - FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 - FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 - FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 -) - -type FileSQSFlag = FileFlagOrAttribute - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - // from winbase.h - - SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) - SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) - SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) - SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - - SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 -) - -// GetFinalPathNameByHandle flags -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters -type GetFinalPathFlag uint32 - -//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( - GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 - - FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 - FILE_NAME_OPENED GetFinalPathFlag = 0x8 - - VOLUME_NAME_DOS GetFinalPathFlag = 0x0 - VOLUME_NAME_GUID GetFinalPathFlag = 0x1 - VOLUME_NAME_NT GetFinalPathFlag = 0x2 - VOLUME_NAME_NONE GetFinalPathFlag = 0x4 -) - -// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle -// with the given handle and flags. It transparently takes care of creating a buffer of the -// correct size for the call. -// -// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew -func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { - b := stringbuffer.NewWString() - //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? - for { - n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) - if err != nil { - return "", err - } - // If the buffer wasn't large enough, n will be the total size needed (including null terminator). - // Resize and try again. - if n > b.Cap() { - b.ResizeTo(n) - continue - } - // If the buffer is large enough, n will be the size not including the null terminator. - // Convert to a Go string and return. - return b.String(), nil - } -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go deleted file mode 100644 index 81760ac67..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go +++ /dev/null @@ -1,12 +0,0 @@ -package fs - -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level -type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` - -// Impersonation levels -const ( - SecurityAnonymous SecurityImpersonationLevel = 0 - SecurityIdentification SecurityImpersonationLevel = 1 - SecurityImpersonation SecurityImpersonationLevel = 2 - SecurityDelegation SecurityImpersonationLevel = 3 -) diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go deleted file mode 100644 index a94e234c7..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package fs - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procCreateFileW = modkernel32.NewProc("CreateFileW") -) - -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go deleted file mode 100644 index 7e82f9afa..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go +++ /dev/null @@ -1,20 +0,0 @@ -package socket - -import ( - "unsafe" -) - -// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The -// struct must meet the Win32 sockaddr requirements specified here: -// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 -// -// Specifically, the struct size must be least larger than an int16 (unsigned short) -// for the address family. -type RawSockaddr interface { - // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing - // for the RawSockaddr's data to be overwritten by syscalls (if necessary). - // - // It is the callers responsibility to validate that the values are valid; invalid - // pointers or size can cause a panic. - Sockaddr() (unsafe.Pointer, int32, error) -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go deleted file mode 100644 index 88580d974..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ /dev/null @@ -1,177 +0,0 @@ -//go:build windows - -package socket - -import ( - "errors" - "fmt" - "net" - "sync" - "syscall" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" - "golang.org/x/sys/windows" -) - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go - -//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname -//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername -//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const socketError = uintptr(^uint32(0)) - -var ( - // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? - - ErrBufferSize = errors.New("buffer size") - ErrAddrFamily = errors.New("address family") - ErrInvalidPointer = errors.New("invalid pointer") - ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) -) - -// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) - -// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. -// If rsa is not large enough, the [windows.WSAEFAULT] is returned. -func GetSockName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - // although getsockname returns WSAEFAULT if the buffer is too small, it does not set - // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy - return getsockname(s, ptr, &l) -} - -// GetPeerName returns the remote address the socket is connected to. -// -// See [GetSockName] for more information. -func GetPeerName(s windows.Handle, rsa RawSockaddr) error { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return getpeername(s, ptr, &l) -} - -func Bind(s windows.Handle, rsa RawSockaddr) (err error) { - ptr, l, err := rsa.Sockaddr() - if err != nil { - return fmt.Errorf("could not retrieve socket pointer and size: %w", err) - } - - return bind(s, ptr, l) -} - -// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the -// their sockaddr interface, so they cannot be used with HvsockAddr -// Replicate functionality here from -// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go - -// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at -// runtime via a WSAIoctl call: -// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks - -type runtimeFunc struct { - id guid.GUID - once sync.Once - addr uintptr - err error -} - -func (f *runtimeFunc) Load() error { - f.once.Do(func() { - var s windows.Handle - s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) - if f.err != nil { - return - } - defer windows.CloseHandle(s) //nolint:errcheck - - var n uint32 - f.err = windows.WSAIoctl(s, - windows.SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&f.id)), - uint32(unsafe.Sizeof(f.id)), - (*byte)(unsafe.Pointer(&f.addr)), - uint32(unsafe.Sizeof(f.addr)), - &n, - nil, // overlapped - 0, // completionRoutine - ) - }) - return f.err -} - -var ( - // todo: add `AcceptEx` and `GetAcceptExSockaddrs` - WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS - Data1: 0x25a207b9, - Data2: 0xddf3, - Data3: 0x4660, - Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, - } - - connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} -) - -func ConnectEx( - fd windows.Handle, - rsa RawSockaddr, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) error { - if err := connectExFunc.Load(); err != nil { - return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) - } - ptr, n, err := rsa.Sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -// BOOL LpfnConnectex( -// [in] SOCKET s, -// [in] const sockaddr *name, -// [in] int namelen, -// [in, optional] PVOID lpSendBuffer, -// [in] DWORD dwSendDataLength, -// [out] LPDWORD lpdwBytesSent, -// [in] LPOVERLAPPED lpOverlapped -// ) - -func connectEx( - s windows.Handle, - name unsafe.Pointer, - namelen int32, - sendBuf *byte, - sendDataLen uint32, - bytesSent *uint32, - overlapped *windows.Overlapped, -) (err error) { - r1, _, e1 := syscall.SyscallN(connectExFunc.addr, - uintptr(s), - uintptr(name), - uintptr(namelen), - uintptr(unsafe.Pointer(sendBuf)), - uintptr(sendDataLen), - uintptr(unsafe.Pointer(bytesSent)), - uintptr(unsafe.Pointer(overlapped)), - ) - - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go deleted file mode 100644 index e1504126a..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package socket - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procbind = modws2_32.NewProc("bind") - procgetpeername = modws2_32.NewProc("getpeername") - procgetsockname = modws2_32.NewProc("getsockname") -) - -func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} - -func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go deleted file mode 100644 index 42ebc019f..000000000 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ /dev/null @@ -1,132 +0,0 @@ -package stringbuffer - -import ( - "sync" - "unicode/utf16" -) - -// TODO: worth exporting and using in mkwinsyscall? - -// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate -// large path strings: -// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. -const MinWStringCap = 310 - -// use *[]uint16 since []uint16 creates an extra allocation where the slice header -// is copied to heap and then referenced via pointer in the interface header that sync.Pool -// stores. -var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly - New: func() interface{} { - b := make([]uint16, MinWStringCap) - return &b - }, -} - -func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } - -// freeBuffer copies the slice header data, and puts a pointer to that in the pool. -// This avoids taking a pointer to the slice header in WString, which can be set to nil. -func freeBuffer(b []uint16) { pathPool.Put(&b) } - -// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings -// for interacting with Win32 APIs. -// Sizes are specified as uint32 and not int. -// -// It is not thread safe. -type WString struct { - // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. - - // raw buffer - b []uint16 -} - -// NewWString returns a [WString] allocated from a shared pool with an -// initial capacity of at least [MinWStringCap]. -// Since the buffer may have been previously used, its contents are not guaranteed to be empty. -// -// The buffer should be freed via [WString.Free] -func NewWString() *WString { - return &WString{ - b: newBuffer(), - } -} - -func (b *WString) Free() { - if b.empty() { - return - } - freeBuffer(b.b) - b.b = nil -} - -// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the -// previous buffer back into pool. -func (b *WString) ResizeTo(c uint32) uint32 { - // already sufficient (or n is 0) - if c <= b.Cap() { - return b.Cap() - } - - if c <= MinWStringCap { - c = MinWStringCap - } - // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places - if c <= 2*b.Cap() { - c = 2 * b.Cap() - } - - b2 := make([]uint16, c) - if !b.empty() { - copy(b2, b.b) - freeBuffer(b.b) - } - b.b = b2 - return c -} - -// Buffer returns the underlying []uint16 buffer. -func (b *WString) Buffer() []uint16 { - if b.empty() { - return nil - } - return b.b -} - -// Pointer returns a pointer to the first uint16 in the buffer. -// If the [WString.Free] has already been called, the pointer will be nil. -func (b *WString) Pointer() *uint16 { - if b.empty() { - return nil - } - return &b.b[0] -} - -// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. -// -// It assumes that the data is null-terminated. -func (b *WString) String() string { - // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" - // and would make this code Windows-only, which makes no sense. - // So copy UTF16ToString code into here. - // If other windows-specific code is added, switch to [windows.UTF16ToString] - - s := b.b - for i, v := range s { - if v == 0 { - s = s[:i] - break - } - } - return string(utf16.Decode(s)) -} - -// Cap returns the underlying buffer capacity. -func (b *WString) Cap() uint32 { - if b.empty() { - return 0 - } - return b.cap() -} - -func (b *WString) cap() uint32 { return uint32(cap(b.b)) } -func (b *WString) empty() bool { return b == nil || b.cap() == 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index a2da6639d..000000000 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,586 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "time" - "unsafe" - - "golang.org/x/sys/windows" - - "github.com/Microsoft/go-winio/internal/fs" -) - -//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW -//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe -//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl - -type PipeConn interface { - net.Conn - Disconnect() error - Flush() error -} - -// type aliases for mkwinsyscall code -type ( - ntAccessMask = fs.AccessMask - ntFileShareMode = fs.FileShareMode - ntFileCreationDisposition = fs.NTFileCreationDisposition - ntFileOptions = fs.NTCreateOptions -) - -type ioStatusBlock struct { - Status, Information uintptr -} - -// typedef struct _OBJECT_ATTRIBUTES { -// ULONG Length; -// HANDLE RootDirectory; -// PUNICODE_STRING ObjectName; -// ULONG Attributes; -// PVOID SecurityDescriptor; -// PVOID SecurityQualityOfService; -// } OBJECT_ATTRIBUTES; -// -// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -// typedef struct _SECURITY_DESCRIPTOR { -// BYTE Revision; -// BYTE Sbz1; -// SECURITY_DESCRIPTOR_CONTROL Control; -// PSID Owner; -// PSID Group; -// PACL Sacl; -// PACL Dacl; -// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; -// -// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl - Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl -} - -type ntStatus int32 - -func (status ntStatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - ErrPipeListenerClosed = net.ErrClosed - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -var _ PipeConn = (*win32Pipe)(nil) - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - if err := f.SetReadDeadline(t); err != nil { - return err - } - return f.SetWriteDeadline(t) -} - -func (f *win32Pipe) Disconnect() error { - return disconnectNamedPipe(f.win32File.handle) -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { //nolint:errorlint - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { - for { - select { - case <-ctx.Done(): - return windows.Handle(0), ctx.Err() - default: - h, err := fs.CreateFile(*path, - access, - 0, // mode - nil, // security attributes - fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), - 0, // template file handle - ) - if err == nil { - return h, nil - } - if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, cancel := context.WithDeadline(context.Background(), absTimeout) - defer cancel() - conn, err := DialPipeContext(ctx, path) - if errors.Is(err, context.DeadlineExceeded) { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) -} - -// PipeImpLevel is an enumeration of impersonation levels that may be set -// when calling DialPipeAccessImpersonation. -type PipeImpLevel uint32 - -const ( - PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) - PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) - PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) - PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) -) - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) -} - -// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with -// `access` at `impLevel` until `ctx` cancellation or timeout. The other -// DialPipe* implementations use PipeImpLevelAnonymous. -func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { - var err error - var h windows.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&windows.PIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle windows.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { - path16, err := windows.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], - &ntPath, - 0, - 0, - ).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck - oa.ObjectName = &ntPath - oa.Attributes = windows.OBJ_CASE_INSENSITIVE - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - //todo: does `sdb` need to be allocated on the heap, or can go allocate it? - l := uint32(len(sd)) - sdb, err := windows.LocalAlloc(0, l) - if err != nil { - return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) - } - defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %w", err) - } - defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck - - sdb := &securityDescriptor{ - Revision: 1, - Control: windows.SE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= windows.FILE_PIPE_MESSAGE_TYPE - } - - disposition := fs.FILE_OPEN - access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE - if first { - disposition = fs.FILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = fs.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h windows.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, - access, - &oa, - &iosb, - fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, - disposition, - 0, - typ, - 0, - 0, - 0xffffffff, - uint32(c.InputBufferSize), - uint32(c.OutputBufferSize), - &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - windows.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno - } - } - windows.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIO() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIO(c, nil, 0, err) - if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 48ce4e924..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,232 +0,0 @@ -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" //nolint:gosec // not used for secure application - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122 section 4.1.1. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 // RFC 4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -func (v Version) String() string { - return strconv.FormatUint(uint64(v), 10) -} - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() //nolint:gosec // not used for secure application - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index 805bd3548..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 27e45ee5c..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows -// +build windows - -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go deleted file mode 100644 index 4076d3132..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. - -package guid - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[VariantUnknown-0] - _ = x[VariantNCS-1] - _ = x[VariantRFC4122-2] - _ = x[VariantMicrosoft-3] - _ = x[VariantFuture-4] -} - -const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" - -var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} - -func (i Variant) String() string { - if i >= Variant(len(_Variant_index)-1) { - return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] -} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index d9b90b6e8..000000000 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,196 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h windows.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - //revive:disable-next-line:var-naming ALL_CAPS - SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - - //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "Could not enable privilege " - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - privileges := make([]uint64, 0, len(names)) - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p := windows.CurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - _ = binary.Write(&b, binary.LittleEndian, p) - _ = binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(windows.SecurityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index 67d1a104a..000000000 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,131 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - _ = binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - _ = binary.Write(&b, binary.LittleEndian, flags) - } - - _ = binary.Write(&b, binary.LittleEndian, ntTarget16) - _ = binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index c3685e98e..000000000 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,133 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "fmt" - "unsafe" - - "golang.org/x/sys/windows" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch { - case errors.Is(e.Err, windows.ERROR_INVALID_SID): - s = "the security ID structure is invalid" - case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -func (e *AccountLookupError) Unwrap() error { return e.Err } - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -func (e *SddlConversionError) Unwrap() error { return e.Err } - -// LookupSidByName looks up the SID of an account by name -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) - return sid, nil -} - -// LookupNameBySid looks up the name of an account by SID -// -//revive:disable-next-line:var-naming SID, not Sid -func LookupNameBySid(sid string) (name string, err error) { - if sid == "" { - return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} - } - - sidBuffer, err := windows.UTF16PtrFromString(sid) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - var sidPtr *byte - if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { - return "", &AccountLookupError{sid, err} - } - defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck - - var nameSize, refDomainSize, sidNameUse uint32 - err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno - return "", &AccountLookupError{sid, err} - } - - nameBuffer := make([]uint16, nameSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{sid, err} - } - - name = windows.UTF16ToString(nameBuffer) - return name, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - sd, err := windows.SecurityDescriptorFromString(sddl) - if err != nil { - return nil, &SddlConversionError{Sddl: sddl, Err: err} - } - b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) - return b, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { - return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) - } - s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) - return s.String(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index a6ca111b3..000000000 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build windows - -package winio - -//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 89b66eda8..000000000 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,378 +0,0 @@ -//go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { - r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) - newport = windows.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { - r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func disconnectNamedPipe(pipe windows.Handle) (err error) { - r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h windows.Handle) { - r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) - h = windows.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) - status = ntStatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) - status = ntStatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) - status = ntStatus(r0) - return -} - -func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt deleted file mode 100644 index 899129ecc..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go deleted file mode 100644 index 6504a2186..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go +++ /dev/null @@ -1,18 +0,0 @@ -package aws - -// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. -type AccountIDEndpointMode string - -const ( - // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing - AccountIDEndpointModeUnset AccountIDEndpointMode = "" - - // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present - AccountIDEndpointModePreferred = "preferred" - - // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity - AccountIDEndpointModeRequired = "required" - - // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing - AccountIDEndpointModeDisabled = "disabled" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go deleted file mode 100644 index fe63fedad..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package arn provides a parser for interacting with Amazon Resource Names. -package arn - -import ( - "errors" - "strings" -) - -const ( - arnDelimiter = ":" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 - - // errors - invalidPrefix = "arn: invalid prefix" - invalidSections = "arn: not enough sections" -) - -// ARN captures the individual fields of an Amazon Resource Name. -// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. -type ARN struct { - // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in - // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China - // (Beijing) region is "aws-cn". - Partition string - - // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of - // namespaces, see - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. - Service string - - // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this - // component might be omitted. - Region string - - // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the - // ARNs for some resources don't require an account number, so this component might be omitted. - AccountID string - - // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — - // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the - // resource name itself. Some services allows paths for resource names, as described in - // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. - Resource string -} - -// Parse parses an ARN into its constituent parts. -// -// Some example ARNs: -// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment -// arn:aws:iam::123456789012:user/David -// arn:aws:rds:eu-west-1:123456789012:db:mysql-db -// arn:aws:s3:::my_corporate_bucket/exampleobject.png -func Parse(arn string) (ARN, error) { - if !strings.HasPrefix(arn, arnPrefix) { - return ARN{}, errors.New(invalidPrefix) - } - sections := strings.SplitN(arn, arnDelimiter, arnSections) - if len(sections) != arnSections { - return ARN{}, errors.New(invalidSections) - } - return ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountID: sections[sectionAccountID], - Resource: sections[sectionResource], - }, nil -} - -// IsARN returns whether the given string is an arn -// by looking for whether the string starts with arn: -func IsARN(arn string) bool { - return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 -} - -// String returns the canonical representation of the ARN -func (arn ARN) String() string { - return arnPrefix + - arn.Partition + arnDelimiter + - arn.Service + arnDelimiter + - arn.Region + arnDelimiter + - arn.AccountID + arnDelimiter + - arn.Resource -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go deleted file mode 100644 index 4152caade..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go +++ /dev/null @@ -1,33 +0,0 @@ -package aws - -// RequestChecksumCalculation controls request checksum calculation workflow -type RequestChecksumCalculation int - -const ( - // RequestChecksumCalculationUnset is the unset value for RequestChecksumCalculation - RequestChecksumCalculationUnset RequestChecksumCalculation = iota - - // RequestChecksumCalculationWhenSupported indicates request checksum will be calculated - // if the operation supports input checksums - RequestChecksumCalculationWhenSupported - - // RequestChecksumCalculationWhenRequired indicates request checksum will be calculated - // if required by the operation or if user elects to set a checksum algorithm in request - RequestChecksumCalculationWhenRequired -) - -// ResponseChecksumValidation controls response checksum validation workflow -type ResponseChecksumValidation int - -const ( - // ResponseChecksumValidationUnset is the unset value for ResponseChecksumValidation - ResponseChecksumValidationUnset ResponseChecksumValidation = iota - - // ResponseChecksumValidationWhenSupported indicates response checksum will be validated - // if the operation supports output checksums - ResponseChecksumValidationWhenSupported - - // ResponseChecksumValidationWhenRequired indicates response checksum will only - // be validated if the operation requires output checksum validation - ResponseChecksumValidationWhenRequired -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go deleted file mode 100644 index a015cc5b2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ /dev/null @@ -1,238 +0,0 @@ -package aws - -import ( - "net/http" - - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// HTTPClient provides the interface to provide custom HTTPClients. Generally -// *http.Client is sufficient for most use cases. The HTTPClient should not -// follow 301 or 302 redirects. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// A Config provides service configuration for service clients. -type Config struct { - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // See http://docs.aws.amazon.com/general/latest/gr/rande.html for - // information on AWS regions. - Region string - - // The credentials object to use when signing requests. - // Use the LoadDefaultConfig to load configuration from all the SDK's supported - // sources, and resolve credentials using the SDK's default credential chain. - Credentials CredentialsProvider - - // The Bearer Authentication token provider to use for authenticating API - // operation calls with a Bearer Authentication token. The API clients and - // operation must support Bearer Authentication scheme in order for the - // token provider to be used. API clients created with NewFromConfig will - // automatically be configured with this option, if the API client support - // Bearer Authentication. - // - // The SDK's config.LoadDefaultConfig can automatically populate this - // option for external configuration options such as SSO session. - // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html - BearerAuthTokenProvider smithybearer.TokenProvider - - // The HTTP Client the SDK's API clients will use to invoke HTTP requests. - // The SDK defaults to a BuildableClient allowing API clients to create - // copies of the HTTP Client for service specific customizations. - // - // Use a (*http.Client) for custom behavior. Using a custom http.Client - // will prevent the SDK from modifying the HTTP client. - HTTPClient HTTPClient - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // See the `aws.EndpointResolver` documentation for additional usage - // information. - // - // Deprecated: See Config.EndpointResolverWithOptions - EndpointResolver EndpointResolver - - // An endpoint resolver that can be used to provide or override an endpoint - // for the given service and region. - // - // When EndpointResolverWithOptions is specified, it will be used by a - // service client rather than using EndpointResolver if also specified. - // - // See the `aws.EndpointResolverWithOptions` documentation for additional - // usage information. - // - // Deprecated: with the release of endpoint resolution v2 in API clients, - // EndpointResolver and EndpointResolverWithOptions are deprecated. - // Providing a value for this field will likely prevent you from using - // newer endpoint-related service features. See API client options - // EndpointResolverV2 and BaseEndpoint. - EndpointResolverWithOptions EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // API Clients will only use this value to construct a retryer if the - // Config.Retryer member is not nil. This value will be ignored if - // Retryer is not nil. - RetryMode RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - // - // In general, the provider function should return a new instance of a - // Retryer if you are attempting to provide a consistent Retryer - // configuration across all clients. This will ensure that each client will - // be provided a new instance of the Retryer implementation, and will avoid - // issues such as sharing the same retry token bucket across services. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API - // clients. - Retryer func() Retryer - - // ConfigSources are the sources that were used to construct the Config. - // Allows for additional configuration to be loaded by clients. - ConfigSources []interface{} - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // The logger writer interface to write logging messages to. Defaults to - // standard error. - Logger logging.Logger - - // Configures the events that will be sent to the configured logger. This - // can be used to configure the logging of signing, retries, request, and - // responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode ClientLogMode - - // The configured DefaultsMode. If not specified, service clients will - // default to legacy. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, - // standard - DefaultsMode DefaultsMode - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode - // is set to DefaultsModeAuto and is initialized by - // `config.LoadDefaultConfig`. You should not populate this structure - // programmatically, or rely on the values here within your applications. - RuntimeEnvironment RuntimeEnvironment - - // AppId is an optional application specific identifier that can be set. - // When set it will be appended to the User-Agent header of every request - // in the form of App/{AppId}. This variable is sourced from environment - // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. - // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for - // more information on environment variables and shared config settings. - AppID string - - // BaseEndpoint is an intermediary transfer location to a service specific - // BaseEndpoint on a service's Options. - BaseEndpoint *string - - // DisableRequestCompression toggles if an operation request could be - // compressed or not. Will be set to false by default. This variable is sourced from - // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute - // disable_request_compression - DisableRequestCompression bool - - // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be - // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. - // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or - // the shared config profile attribute request_min_compression_size_bytes - RequestMinCompressSizeBytes int64 - - // Controls how a resolved AWS account ID is handled for endpoint routing. - AccountIDEndpointMode AccountIDEndpointMode - - // RequestChecksumCalculation determines when request checksum calculation is performed. - // - // There are two possible values for this setting: - // - // 1. RequestChecksumCalculationWhenSupported (default): The checksum is always calculated - // if the operation supports it, regardless of whether the user sets an algorithm in the request. - // - // 2. RequestChecksumCalculationWhenRequired: The checksum is only calculated if the user - // explicitly sets a checksum algorithm in the request. - // - // This setting is sourced from the environment variable AWS_REQUEST_CHECKSUM_CALCULATION - // or the shared config profile attribute "request_checksum_calculation". - RequestChecksumCalculation RequestChecksumCalculation - - // ResponseChecksumValidation determines when response checksum validation is performed - // - // There are two possible values for this setting: - // - // 1. ResponseChecksumValidationWhenSupported (default): The checksum is always validated - // if the operation supports it, regardless of whether the user sets the validation mode to ENABLED in request. - // - // 2. ResponseChecksumValidationWhenRequired: The checksum is only validated if the user - // explicitly sets the validation mode to ENABLED in the request - // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or - // the shared config profile attribute "response_checksum_validation". - ResponseChecksumValidation ResponseChecksumValidation -} - -// NewConfig returns a new Config pointer that can be chained with builder -// methods to set multiple configuration values inline without using pointers. -func NewConfig() *Config { - return &Config{} -} - -// Copy will return a shallow copy of the Config object. -func (c Config) Copy() Config { - cp := c - return cp -} - -// EndpointDiscoveryEnableState indicates if endpoint discovery is -// enabled, disabled, auto or unset state. -// -// Default behavior (Auto or Unset) indicates operations that require endpoint -// discovery will use Endpoint Discovery by default. Operations that -// optionally use Endpoint Discovery will not use Endpoint Discovery -// unless EndpointDiscovery is explicitly enabled. -type EndpointDiscoveryEnableState uint - -// Enumeration values for EndpointDiscoveryEnableState -const ( - // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. - // Users do not need to use this value explicitly. The behavior for unset - // is the same as for EndpointDiscoveryAuto. - EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota - - // EndpointDiscoveryAuto represents an AUTO state that allows endpoint - // discovery only when required by the api. This is the default - // configuration resolved by the client if endpoint discovery is neither - // enabled or disabled. - EndpointDiscoveryAuto // default state - - // EndpointDiscoveryDisabled indicates client MUST not perform endpoint - // discovery even when required. - EndpointDiscoveryDisabled - - // EndpointDiscoveryEnabled indicates client MUST always perform endpoint - // discovery if supported for the operation. - EndpointDiscoveryEnabled -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go deleted file mode 100644 index 4d8e26ef3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package aws - -import ( - "context" - "time" -) - -type suppressedContext struct { - context.Context -} - -func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (s *suppressedContext) Done() <-chan struct{} { - return nil -} - -func (s *suppressedContext) Err() error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go deleted file mode 100644 index 781ac0ae2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ /dev/null @@ -1,224 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" -) - -// CredentialsCacheOptions are the options -type CredentialsCacheOptions struct { - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // An ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. This can cause an - // increased number of requests to refresh the credentials to occur. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration - - // ExpiryWindowJitterFrac provides a mechanism for randomizing the - // expiration of credentials within the configured ExpiryWindow by a random - // percentage. Valid values are between 0.0 and 1.0. - // - // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac - // is 0.5 then credentials will be set to expire between 30 to 60 seconds - // prior to their actual expiration time. - // - // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. - // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. - // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. - // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. - ExpiryWindowJitterFrac float64 -} - -// CredentialsCache provides caching and concurrency safe credentials retrieval -// via the provider's retrieve method. -// -// CredentialsCache will look for optional interfaces on the Provider to adjust -// how the credential cache handles credentials caching. -// -// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle -// credential refresh failures. This could return an updated Credentials -// value, or attempt another means of retrieving credentials. -// -// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how -// credentials Expires is modified. This could modify how the Credentials -// Expires is adjusted based on the CredentialsCache ExpiryWindow option. -// Such as providing a floor not to reduce the Expires below. -type CredentialsCache struct { - provider CredentialsProvider - - options CredentialsCacheOptions - creds atomic.Value - sf singleflight.Group -} - -// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider -// is expected to not be nil. A variadic list of one or more functions can be -// provided to modify the CredentialsCache configuration. This allows for -// configuration of credential expiry window and jitter. -func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { - options := CredentialsCacheOptions{} - - for _, fn := range optFns { - fn(&options) - } - - if options.ExpiryWindow < 0 { - options.ExpiryWindow = 0 - } - - if options.ExpiryWindowJitterFrac < 0 { - options.ExpiryWindowJitterFrac = 0 - } else if options.ExpiryWindowJitterFrac > 1 { - options.ExpiryWindowJitterFrac = 1 - } - - return &CredentialsCache{ - provider: provider, - options: options, - } -} - -// Retrieve returns the credentials. If the credentials have already been -// retrieved, and not expired the cached credentials will be returned. If the -// credentials have not been retrieved yet, or expired the provider's Retrieve -// method will be called. -// -// Returns and error if the provider's retrieve method returns an error. -func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { - if creds, ok := p.getCreds(); ok && !creds.Expired() { - return creds, nil - } - - resCh := p.sf.DoChan("", func() (interface{}, error) { - return p.singleRetrieve(&suppressedContext{ctx}) - }) - select { - case res := <-resCh: - return res.Val.(Credentials), res.Err - case <-ctx.Done(): - return Credentials{}, &RequestCanceledError{Err: ctx.Err()} - } -} - -func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { - currCreds, ok := p.getCreds() - if ok && !currCreds.Expired() { - return currCreds, nil - } - - newCreds, err := p.provider.Retrieve(ctx) - if err != nil { - handleFailToRefresh := defaultHandleFailToRefresh - if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { - handleFailToRefresh = cs.HandleFailToRefresh - } - newCreds, err = handleFailToRefresh(ctx, currCreds, err) - if err != nil { - return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) - } - } - - if newCreds.CanExpire && p.options.ExpiryWindow > 0 { - adjustExpiresBy := defaultAdjustExpiresBy - if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { - adjustExpiresBy = cs.AdjustExpiresBy - } - - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) - } - - var jitter time.Duration - if p.options.ExpiryWindowJitterFrac > 0 { - jitter = time.Duration(randFloat64 * - p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) - } - - newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) - if err != nil { - return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) - } - } - - p.creds.Store(&newCreds) - return newCreds, nil -} - -// getCreds returns the currently stored credentials and true. Returning false -// if no credentials were stored. -func (p *CredentialsCache) getCreds() (Credentials, bool) { - v := p.creds.Load() - if v == nil { - return Credentials{}, false - } - - c := v.(*Credentials) - if c == nil || !c.HasKeys() { - return Credentials{}, false - } - - return *c, true -} - -// Invalidate will invalidate the cached credentials. The next call to Retrieve -// will cause the provider's Retrieve method to be called. -func (p *CredentialsCache) Invalidate() { - p.creds.Store((*Credentials)(nil)) -} - -// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache -// matches the target provider type. -func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { - return IsCredentialsProvider(p.provider, target) -} - -// HandleFailRefreshCredentialsCacheStrategy is an interface for -// CredentialsCache to allow CredentialsProvider how failed to refresh -// credentials is handled. -type HandleFailRefreshCredentialsCacheStrategy interface { - // Given the previously cached Credentials, if any, and refresh error, may - // returns new or modified set of Credentials, or error. - // - // Credential caches may use default implementation if nil. - HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) -} - -// defaultHandleFailToRefresh returns the passed in error. -func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { - return Credentials{}, err -} - -// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache -// to allow CredentialsProvider to intercept adjustments to Credentials expiry -// based on expectations and use cases of CredentialsProvider. -// -// Credential caches may use default implementation if nil. -type AdjustExpiresByCredentialsCacheStrategy interface { - // Given a Credentials as input, applying any mutations and - // returning the potentially updated Credentials, or error. - AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) -} - -// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, -// and returns the updated credentials value. If Credentials value's CanExpire -// is false, the passed in credentials are returned unchanged. -func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { - if !creds.CanExpire { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go deleted file mode 100644 index 98ba77056..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ /dev/null @@ -1,173 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// AnonymousCredentials provides a sentinel CredentialsProvider that should be -// used to instruct the SDK's signing middleware to not sign the request. -// -// Using `nil` credentials when configuring an API client will achieve the same -// result. The AnonymousCredentials type allows you to configure the SDK's -// external config loading to not attempt to source credentials from the shared -// config or environment. -// -// For example you can use this CredentialsProvider with an API client's -// Options to instruct the client not to sign a request for accessing public -// S3 bucket objects. -// -// The following example demonstrates using the AnonymousCredentials to prevent -// SDK's external config loading attempt to resolve credentials. -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithCredentialsProvider(aws.AnonymousCredentials{}), -// ) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(cfg) -// -// Alternatively you can leave the API client Option's `Credential` member to -// nil. If using the `NewFromConfig` constructor you'll need to explicitly set -// the `Credentials` member to nil, if the external config resolved a -// credential provider. -// -// client := s3.New(s3.Options{ -// // Credentials defaults to a nil value. -// }) -// -// This can also be configured for specific operations calls too. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } -// -// client := s3.NewFromConfig(config) -// -// result, err := client.GetObject(context.TODO(), s3.GetObject{ -// Bucket: aws.String("example-bucket"), -// Key: aws.String("example-key"), -// }, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) -type AnonymousCredentials struct{} - -// Retrieve implements the CredentialsProvider interface, but will always -// return error, and cannot be used to sign a request. The AnonymousCredentials -// type is used as a sentinel type instructing the AWS request signing -// middleware to not sign a request. -func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { - return Credentials{Source: "AnonymousCredentials"}, - fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") -} - -// A Credentials is the AWS credentials value for individual credential fields. -type Credentials struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Source of the credentials - Source string - - // States if the credentials can expire or not. - CanExpire bool - - // The time the credentials will expire at. Should be ignored if CanExpire - // is false. - Expires time.Time - - // The ID of the account for the credentials. - AccountID string -} - -// Expired returns if the credentials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - // Calling Round(0) on the current time will truncate the monotonic - // reading only. Ensures credential expiry time is always based on - // reported wall-clock time. - return !v.Expires.After(sdk.NowTime().Round(0)) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 -} - -// A CredentialsProvider is the interface for any component which will provide -// credentials Credentials. A CredentialsProvider is required to manage its own -// Expired state, and what to be expired means. -// -// A credentials provider implementation can be wrapped with a CredentialCache -// to cache the credential value retrieved. Without the cache the SDK will -// attempt to retrieve the credentials for every request. -type CredentialsProvider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve(ctx context.Context) (Credentials, error) -} - -// CredentialsProviderFunc provides a helper wrapping a function value to -// satisfy the CredentialsProvider interface. -type CredentialsProviderFunc func(context.Context) (Credentials, error) - -// Retrieve delegates to the function value the CredentialsProviderFunc wraps. -func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { - return fn(ctx) -} - -type isCredentialsProvider interface { - IsCredentialsProvider(CredentialsProvider) bool -} - -// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the -// implementation type. -// -// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating -// whether target matches the credential provider type. -// -// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: -// -// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false -// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false -// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false -func IsCredentialsProvider(provider, target CredentialsProvider) bool { - if target == nil || provider == nil { - return provider == target - } - - if x, ok := provider.(isCredentialsProvider); ok { - return x.IsCredentialsProvider(target) - } - - targetType := reflect.TypeOf(target) - if targetType.Kind() != reflect.Ptr { - targetType = reflect.PtrTo(targetType) - } - - providerType := reflect.TypeOf(provider) - if providerType.Kind() != reflect.Ptr { - providerType = reflect.PtrTo(providerType) - } - - return targetType.AssignableTo(providerType) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go deleted file mode 100644 index fd408e518..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go +++ /dev/null @@ -1,38 +0,0 @@ -package defaults - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "runtime" - "strings" -) - -var getGOOS = func() string { - return runtime.GOOS -} - -// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode -// is set to aws.DefaultsModeAuto. -func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { - goos := getGOOS() - if goos == "android" || goos == "ios" { - return aws.DefaultsModeMobile - } - - var currentRegion string - if len(environment.EnvironmentIdentifier) > 0 { - currentRegion = environment.Region - } - - if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { - currentRegion = environment.EC2InstanceMetadataRegion - } - - if len(region) > 0 && len(currentRegion) > 0 { - if strings.EqualFold(region, currentRegion) { - return aws.DefaultsModeInRegion - } - return aws.DefaultsModeCrossRegion - } - - return aws.DefaultsModeStandard -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go deleted file mode 100644 index 8b7e01fa2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go +++ /dev/null @@ -1,43 +0,0 @@ -package defaults - -import ( - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// Configuration is the set of SDK configuration options that are determined based -// on the configured DefaultsMode. -type Configuration struct { - // RetryMode is the configuration's default retry mode API clients should - // use for constructing a Retryer. - RetryMode aws.RetryMode - - // ConnectTimeout is the maximum amount of time a dial will wait for - // a connect to complete. - // - // See https://pkg.go.dev/net#Dialer.Timeout - ConnectTimeout *time.Duration - - // TLSNegotiationTimeout specifies the maximum amount of time waiting to - // wait for a TLS handshake. - // - // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout - TLSNegotiationTimeout *time.Duration -} - -// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. -func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { - if c.ConnectTimeout == nil { - return 0, false - } - return *c.ConnectTimeout, true -} - -// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. -func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { - if c.TLSNegotiationTimeout == nil { - return 0, false - } - return *c.TLSNegotiationTimeout, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go deleted file mode 100644 index dbaa873dc..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. - -package defaults - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "time" -) - -// GetModeConfiguration returns the default Configuration descriptor for the given mode. -// -// Supports the following modes: cross-region, in-region, mobile, standard -func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { - var mv aws.DefaultsMode - mv.SetFromString(string(mode)) - - switch mv { - case aws.DefaultsModeCrossRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeInRegion: - settings := Configuration{ - ConnectTimeout: aws.Duration(1100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeMobile: - settings := Configuration{ - ConnectTimeout: aws.Duration(30000 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), - } - return settings, nil - case aws.DefaultsModeStandard: - settings := Configuration{ - ConnectTimeout: aws.Duration(3100 * time.Millisecond), - RetryMode: aws.RetryMode("standard"), - TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), - } - return settings, nil - default: - return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go deleted file mode 100644 index 2d90011b4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package defaults provides recommended configuration values for AWS SDKs and CLIs. -package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go deleted file mode 100644 index fcf9387c2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. - -package aws - -import ( - "strings" -) - -// DefaultsMode is the SDK defaults mode setting. -type DefaultsMode string - -// The DefaultsMode constants. -const ( - // DefaultsModeAuto is an experimental mode that builds on the standard mode. - // The SDK will attempt to discover the execution environment to determine the - // appropriate settings automatically. - // - // Note that the auto detection is heuristics-based and does not guarantee 100% - // accuracy. STANDARD mode will be used if the execution environment cannot - // be determined. The auto detection might query EC2 Instance Metadata service - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), - // which might introduce latency. Therefore we recommend choosing an explicit - // defaults_mode instead if startup latency is critical to your application - DefaultsModeAuto DefaultsMode = "auto" - - // DefaultsModeCrossRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services in a different region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeCrossRegion DefaultsMode = "cross-region" - - // DefaultsModeInRegion builds on the standard mode and includes optimization - // tailored for applications which call AWS services from within the same AWS - // region - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeInRegion DefaultsMode = "in-region" - - // DefaultsModeLegacy provides default settings that vary per SDK and were used - // prior to establishment of defaults_mode - DefaultsModeLegacy DefaultsMode = "legacy" - - // DefaultsModeMobile builds on the standard mode and includes optimization - // tailored for mobile applications - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeMobile DefaultsMode = "mobile" - - // DefaultsModeStandard provides the latest recommended default values that - // should be safe to run in most scenarios - // - // Note that the default values vended from this mode might change as best practices - // may evolve. As a result, it is encouraged to perform tests when upgrading - // the SDK - DefaultsModeStandard DefaultsMode = "standard" -) - -// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches -// the provided string when compared using EqualFold. If the value does not match a known -// constant it will be set to as-is and the function will return false. As a special case, if the -// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. -func (d *DefaultsMode) SetFromString(v string) (ok bool) { - switch { - case strings.EqualFold(v, string(DefaultsModeAuto)): - *d = DefaultsModeAuto - ok = true - case strings.EqualFold(v, string(DefaultsModeCrossRegion)): - *d = DefaultsModeCrossRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeInRegion)): - *d = DefaultsModeInRegion - ok = true - case strings.EqualFold(v, string(DefaultsModeLegacy)): - *d = DefaultsModeLegacy - ok = true - case strings.EqualFold(v, string(DefaultsModeMobile)): - *d = DefaultsModeMobile - ok = true - case strings.EqualFold(v, string(DefaultsModeStandard)): - *d = DefaultsModeStandard - ok = true - case len(v) == 0: - *d = DefaultsModeLegacy - ok = true - default: - *d = DefaultsMode(v) - } - return ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go deleted file mode 100644 index d8b6e09e5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package aws provides the core SDK's utilities and shared types. Use this package's -// utilities to simplify setting and reading API operations parameters. -// -// # Value and Pointer Conversion Utilities -// -// This package includes a helper conversion utility for each scalar type the SDK's -// API use. These utilities make getting a pointer of the scalar, and dereferencing -// a pointer easier. -// -// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. -// The Pointer to value will safely dereference the pointer and return its value. -// If the pointer was nil, the scalar's zero value will be returned. -// -// The value to pointer functions will be named after the scalar type. So get a -// *string from a string value use the "String" function. This makes it easy to -// to get pointer of a literal string value, because getting the address of a -// literal requires assigning the value to a variable first. -// -// var strPtr *string -// -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str -// -// // With the SDK's conversion functions -// strPtr = aws.String("my string") -// -// // Convert *string to string value -// str = aws.ToString(strPtr) -// -// In addition to scalars the aws package also includes conversion utilities for -// map and slice for commonly types used in API parameters. The map and slice -// conversion functions use similar naming pattern as the scalar conversion -// functions. -// -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} -// -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) -// -// // Convert []*string to []string -// strs = aws.ToStringSlice(strPtrs) -// -// # SDK Default HTTP Client -// -// The SDK will use the http.DefaultClient if a HTTP client is not provided to -// the SDK's Session, or service client constructor. This means that if the -// http.DefaultClient is modified by other components of your application the -// modifications will be picked up by the SDK as well. -// -// In some cases this might be intended, but it is a better practice to create -// a custom HTTP Client to share explicitly through your application. You can -// configure the SDK to use the custom HTTP Client by setting the HTTPClient -// value of the SDK's Config type when creating a Session or service client. -package aws - -// generate.go uses a build tag of "ignore", go run doesn't need to specify -// this because go run ignores all build flags when running a go file directly. -//go:generate go run -tags codegen generate.go -//go:generate go run -tags codegen logging_generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go deleted file mode 100644 index 99edbf3ee..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ /dev/null @@ -1,247 +0,0 @@ -package aws - -import ( - "fmt" -) - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { - type iface interface { - GetUseDualStackEndpoint() DualStackEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseDualStackEndpoint() - found = true - break - } - } - return value, found -} - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. -// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. -func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { - type iface interface { - GetUseFIPSEndpoint() FIPSEndpointState - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetUseFIPSEndpoint() - found = true - break - } - } - return value, found -} - -// Endpoint represents the endpoint a service client should make API operation -// calls to. -// -// The SDK will automatically resolve these endpoints per API client using an -// internal endpoint resolvers. If you'd like to provide custom endpoint -// resolving behavior you can implement the EndpointResolver interface. -// -// Deprecated: This structure was used with the global [EndpointResolver] -// interface, which has been deprecated in favor of service-specific endpoint -// resolution. See the deprecation docs on that interface for more information. -type Endpoint struct { - // The base URL endpoint the SDK API clients will use to make API calls to. - // The SDK will suffix URI path and query elements to this endpoint. - URL string - - // Specifies if the endpoint's hostname can be modified by the SDK's API - // client. - // - // If the hostname is mutable the SDK API clients may modify any part of - // the hostname based on the requirements of the API, (e.g. adding, or - // removing content in the hostname). Such as, Amazon S3 API client - // prefixing "bucketname" to the hostname, or changing the - // hostname service name component from "s3." to "s3-accesspoint.dualstack." - // for the dualstack endpoint of an S3 Accesspoint resource. - // - // Care should be taken when providing a custom endpoint for an API. If the - // endpoint hostname is mutable, and the client cannot modify the endpoint - // correctly, the operation call will most likely fail, or have undefined - // behavior. - // - // If hostname is immutable, the SDK API clients will not modify the - // hostname of the URL. This may cause the API client not to function - // correctly if the API requires the operation specific hostname values - // to be used by the client. - // - // This flag does not modify the API client's behavior if this endpoint - // will be used instead of Endpoint Discovery, or if the endpoint will be - // used to perform Endpoint Discovery. That behavior is configured via the - // API Client's Options. - HostnameImmutable bool - - // The AWS partition the endpoint belongs to. - PartitionID string - - // The service name that should be used for signing the requests to the - // endpoint. - SigningName string - - // The region that should be used for signing the request to the endpoint. - SigningRegion string - - // The signing method that should be used for signing the requests to the - // endpoint. - SigningMethod string - - // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. - // When providing a custom endpoint, you should set the source as EndpointSourceCustom. - // If source is not provided when providing a custom endpoint, the SDK may not - // perform required host mutations correctly. Source should be used along with - // HostnameImmutable property as per the usage requirement. - Source EndpointSource -} - -// EndpointSource is the endpoint source type. -// -// Deprecated: The global [Endpoint] structure is deprecated. -type EndpointSource int - -const ( - // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. - EndpointSourceServiceMetadata EndpointSource = iota - - // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when - // user provides a custom endpoint to be used by the SDK. - EndpointSourceCustom -) - -// EndpointNotFoundError is a sentinel error to indicate that the -// EndpointResolver implementation was unable to resolve an endpoint for the -// given service and region. Resolvers should use this to indicate that an API -// client should fallback and attempt to use it's internal default resolver to -// resolve the endpoint. -type EndpointNotFoundError struct { - Err error -} - -// Error is the error message. -func (e *EndpointNotFoundError) Error() string { - return fmt.Sprintf("endpoint not found, %v", e.Err) -} - -// Unwrap returns the underlying error. -func (e *EndpointNotFoundError) Unwrap() error { - return e.Err -} - -// EndpointResolver is an endpoint resolver that can be used to provide or -// override an endpoint for the given service and region. API clients will -// attempt to use the EndpointResolver first to resolve an endpoint if -// available. If the EndpointResolver returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -// -// Deprecated: The global endpoint resolution interface is deprecated. The API -// for endpoint resolution is now unique to each service and is set via the -// EndpointResolverV2 field on service client options. Setting a value for -// EndpointResolver on aws.Config or service client options will prevent you -// from using any endpoint-related service features released after the -// introduction of EndpointResolverV2. You may also encounter broken or -// unexpected behavior when using the old global interface with services that -// use many endpoint-related customizations such as S3. -type EndpointResolver interface { - ResolveEndpoint(service, region string) (Endpoint, error) -} - -// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverFunc func(service, region string) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { - return e(service, region) -} - -// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or -// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will -// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if -// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, -// API clients will fallback to attempting to resolve the endpoint using its -// internal default endpoint resolver. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverWithOptions interface { - ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) -} - -// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [EndpointResolver]. -type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) - -// ResolveEndpoint calls the wrapped function and returns the results. -func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { - return e(service, region, options...) -} - -// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. -// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. -func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { - type iface interface { - GetDisableHTTPS() bool - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetDisableHTTPS() - found = true - break - } - } - return value, found -} - -// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. -// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. -func GetResolvedRegion(options ...interface{}) (value string, found bool) { - type iface interface { - GetResolvedRegion() string - } - for _, option := range options { - if i, ok := option.(iface); ok { - value = i.GetResolvedRegion() - found = true - break - } - } - return value, found -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go deleted file mode 100644 index f390a08f9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package aws - -// MissingRegionError is an error that is returned if region configuration -// value was not found. -type MissingRegionError struct{} - -func (*MissingRegionError) Error() string { - return "an AWS region is required, but was not found" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go deleted file mode 100644 index 2394418e9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - return ptr.ToBool(p) -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - return ptr.ToBoolSlice(vs) -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - return ptr.ToBoolMap(vs) -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - return ptr.ToByte(p) -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - return ptr.ToByteSlice(vs) -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - return ptr.ToByteMap(vs) -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - return ptr.ToString(p) -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - return ptr.ToStringSlice(vs) -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - return ptr.ToStringMap(vs) -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - return ptr.ToInt(p) -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - return ptr.ToIntSlice(vs) -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - return ptr.ToIntMap(vs) -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - return ptr.ToInt8(p) -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - return ptr.ToInt8Slice(vs) -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - return ptr.ToInt8Map(vs) -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - return ptr.ToInt16(p) -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - return ptr.ToInt16Slice(vs) -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - return ptr.ToInt16Map(vs) -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - return ptr.ToInt32(p) -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - return ptr.ToInt32Slice(vs) -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - return ptr.ToInt32Map(vs) -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - return ptr.ToInt64(p) -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - return ptr.ToInt64Slice(vs) -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - return ptr.ToInt64Map(vs) -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - return ptr.ToUint(p) -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - return ptr.ToUintSlice(vs) -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - return ptr.ToUintMap(vs) -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - return ptr.ToUint8(p) -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - return ptr.ToUint8Slice(vs) -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - return ptr.ToUint8Map(vs) -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - return ptr.ToUint16(p) -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - return ptr.ToUint16Slice(vs) -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - return ptr.ToUint16Map(vs) -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - return ptr.ToUint32(p) -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - return ptr.ToUint32Slice(vs) -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - return ptr.ToUint32Map(vs) -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - return ptr.ToUint64(p) -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - return ptr.ToUint64Slice(vs) -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - return ptr.ToUint64Map(vs) -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - return ptr.ToFloat32(p) -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - return ptr.ToFloat32Slice(vs) -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - return ptr.ToFloat32Map(vs) -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - return ptr.ToFloat64(p) -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - return ptr.ToFloat64Slice(vs) -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - return ptr.ToFloat64Map(vs) -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - return ptr.ToTime(p) -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - return ptr.ToTimeSlice(vs) -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - return ptr.ToTimeMap(vs) -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - return ptr.ToDuration(p) -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - return ptr.ToDurationSlice(vs) -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - return ptr.ToDurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go deleted file mode 100644 index 56e60759e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package aws - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.33.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go deleted file mode 100644 index 91c94d987..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( - LogSigning ClientLogMode = 1 << (64 - 1 - iota) - LogRetries - LogRequest - LogRequestWithBody - LogResponse - LogResponseWithBody - LogDeprecatedUsage - LogRequestEventMessage - LogResponseEventMessage -) - -// IsSigning returns whether the Signing logging mode bit is set -func (m ClientLogMode) IsSigning() bool { - return m&LogSigning != 0 -} - -// IsRetries returns whether the Retries logging mode bit is set -func (m ClientLogMode) IsRetries() bool { - return m&LogRetries != 0 -} - -// IsRequest returns whether the Request logging mode bit is set -func (m ClientLogMode) IsRequest() bool { - return m&LogRequest != 0 -} - -// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set -func (m ClientLogMode) IsRequestWithBody() bool { - return m&LogRequestWithBody != 0 -} - -// IsResponse returns whether the Response logging mode bit is set -func (m ClientLogMode) IsResponse() bool { - return m&LogResponse != 0 -} - -// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set -func (m ClientLogMode) IsResponseWithBody() bool { - return m&LogResponseWithBody != 0 -} - -// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set -func (m ClientLogMode) IsDeprecatedUsage() bool { - return m&LogDeprecatedUsage != 0 -} - -// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set -func (m ClientLogMode) IsRequestEventMessage() bool { - return m&LogRequestEventMessage != 0 -} - -// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set -func (m ClientLogMode) IsResponseEventMessage() bool { - return m&LogResponseEventMessage != 0 -} - -// ClearSigning clears the Signing logging mode bit -func (m *ClientLogMode) ClearSigning() { - *m &^= LogSigning -} - -// ClearRetries clears the Retries logging mode bit -func (m *ClientLogMode) ClearRetries() { - *m &^= LogRetries -} - -// ClearRequest clears the Request logging mode bit -func (m *ClientLogMode) ClearRequest() { - *m &^= LogRequest -} - -// ClearRequestWithBody clears the RequestWithBody logging mode bit -func (m *ClientLogMode) ClearRequestWithBody() { - *m &^= LogRequestWithBody -} - -// ClearResponse clears the Response logging mode bit -func (m *ClientLogMode) ClearResponse() { - *m &^= LogResponse -} - -// ClearResponseWithBody clears the ResponseWithBody logging mode bit -func (m *ClientLogMode) ClearResponseWithBody() { - *m &^= LogResponseWithBody -} - -// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit -func (m *ClientLogMode) ClearDeprecatedUsage() { - *m &^= LogDeprecatedUsage -} - -// ClearRequestEventMessage clears the RequestEventMessage logging mode bit -func (m *ClientLogMode) ClearRequestEventMessage() { - *m &^= LogRequestEventMessage -} - -// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit -func (m *ClientLogMode) ClearResponseEventMessage() { - *m &^= LogResponseEventMessage -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go deleted file mode 100644 index 6ecc2231a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build clientlogmode -// +build clientlogmode - -package main - -import ( - "fmt" - "log" - "os" - "strings" - "text/template" -) - -var config = struct { - ModeBits []string -}{ - // Items should be appended only to keep bit-flag positions stable - ModeBits: []string{ - "Signing", - "Retries", - "Request", - "RequestWithBody", - "Response", - "ResponseWithBody", - "DeprecatedUsage", - "RequestEventMessage", - "ResponseEventMessage", - }, -} - -func bitName(name string) string { - return strings.ToUpper(name[:1]) + name[1:] -} - -var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ - "symbolName": func(name string) string { - return "Log" + bitName(name) - }, - "bitName": bitName, -}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. - -package aws - -// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where -// each bit is a flag that describes the logging behavior for one or more client components. -// The entire 64-bit group is reserved for later expansion by the SDK. -// -// Example: Setting ClientLogMode to enable logging of retries and requests -// clientLogMode := aws.LogRetries | aws.LogRequest -// -// Example: Adding an additional log mode to an existing ClientLogMode value -// clientLogMode |= aws.LogResponse -type ClientLogMode uint64 - -// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. -const ( -{{- range $index, $field := .ModeBits }} - {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} -{{- end }} -) -{{ range $_, $field := .ModeBits }} -// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set -func (m ClientLogMode) Is{{- bitName $field }}() bool { - return m&{{- (symbolName $field) }} != 0 -} -{{ end }} -{{- range $_, $field := .ModeBits }} -// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit -func (m *ClientLogMode) Clear{{- bitName $field }}() { - *m &^= {{ (symbolName $field) }} -} -{{ end -}} -`)) - -func main() { - uniqueBitFields := make(map[string]struct{}) - - for _, bitName := range config.ModeBits { - if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { - panic(fmt.Sprintf("duplicate bit field: %s", bitName)) - } - uniqueBitFields[bitName] = struct{}{} - } - - file, err := os.Create("logging.go") - if err != nil { - log.Fatal(err) - } - defer file.Close() - - err = tmpl.Execute(file, config) - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go deleted file mode 100644 index d66f0960a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go +++ /dev/null @@ -1,213 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - - "github.com/aws/smithy-go/middleware" -) - -// RegisterServiceMetadata registers metadata about the service and operation into the middleware context -// so that it is available at runtime for other middleware to introspect. -type RegisterServiceMetadata struct { - ServiceID string - SigningName string - Region string - OperationName string -} - -// ID returns the middleware identifier. -func (s *RegisterServiceMetadata) ID() string { - return "RegisterServiceMetadata" -} - -// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. -func (s RegisterServiceMetadata) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { - if len(s.ServiceID) > 0 { - ctx = SetServiceID(ctx, s.ServiceID) - } - if len(s.SigningName) > 0 { - ctx = SetSigningName(ctx, s.SigningName) - } - if len(s.Region) > 0 { - ctx = setRegion(ctx, s.Region) - } - if len(s.OperationName) > 0 { - ctx = setOperationName(ctx, s.OperationName) - } - return next.HandleInitialize(ctx, in) -} - -// service metadata keys for storing and lookup of runtime stack information. -type ( - serviceIDKey struct{} - signingNameKey struct{} - signingRegionKey struct{} - regionKey struct{} - operationNameKey struct{} - partitionIDKey struct{} - requiresLegacyEndpointsKey struct{} -) - -// GetServiceID retrieves the service id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetServiceID(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) - return v -} - -// GetSigningName retrieves the service signing name from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing name is available -// in the signer properties object passed to the signer. -func GetSigningName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) - return v -} - -// GetSigningRegion retrieves the region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. The resolved signing region is available -// in the signer properties object passed to the signer. -func GetSigningRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) - return v -} - -// GetRegion retrieves the endpoint region from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRegion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) - return v -} - -// GetOperationName retrieves the service operation metadata from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetOperationName(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) - return v -} - -// GetPartitionID retrieves the endpoint partition id from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPartitionID(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) - return v -} - -// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint -// customizations need to be executed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetRequiresLegacyEndpoints(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) - return v -} - -// SetRequiresLegacyEndpoints set or modifies the flag indicated that -// legacy endpoint customizations are needed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) -} - -// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningName client option -// funcs instead. -func SetSigningName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingNameKey{}, value) -} - -// SetSigningRegion sets or modifies the region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -// -// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option -// funcs instead. -func SetSigningRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, signingRegionKey{}, value) -} - -// SetServiceID sets the service id on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetServiceID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, serviceIDKey{}, value) -} - -// setRegion sets the endpoint region on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRegion(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, regionKey{}, value) -} - -// setOperationName sets the service operation on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setOperationName(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, operationNameKey{}, value) -} - -// SetPartitionID sets the partition id of a resolved region on the context -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPartitionID(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, partitionIDKey{}, value) -} - -// EndpointSource key -type endpointSourceKey struct{} - -// GetEndpointSource returns an endpoint source if set on context -func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { - v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) - return v -} - -// SetEndpointSource sets endpoint source on context -func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { - return middleware.WithStackValue(ctx, endpointSourceKey{}, value) -} - -type signingCredentialsKey struct{} - -// GetSigningCredentials returns the credentials that were used for signing if set on context. -func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { - v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) - return v -} - -// SetSigningCredentials sets the credentails used for signing on the context. -func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { - return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go deleted file mode 100644 index 6d5f0079c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go +++ /dev/null @@ -1,168 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyrand "github.com/aws/smithy-go/rand" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation -// invocation. -type ClientRequestID struct{} - -// ID the identifier for the ClientRequestID -func (r *ClientRequestID) ID() string { - return "ClientRequestID" -} - -// HandleBuild attaches a unique operation invocation id for the operation to the request -func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() - if err != nil { - return out, metadata, err - } - - const invocationIDHeader = "Amz-Sdk-Invocation-Id" - req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) - - return next.HandleBuild(ctx, in) -} - -// RecordResponseTiming records the response timing for the SDK client requests. -type RecordResponseTiming struct{} - -// ID is the middleware identifier -func (a *RecordResponseTiming) ID() string { - return "RecordResponseTiming" -} - -// HandleDeserialize calculates response metadata and clock skew -func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - responseAt := sdk.NowTime() - setResponseAt(&metadata, responseAt) - - var serverTime time.Time - - switch resp := out.RawResponse.(type) { - case *smithyhttp.Response: - respDateHeader := resp.Header.Get("Date") - if len(respDateHeader) == 0 { - break - } - var parseErr error - serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) - if parseErr != nil { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", - parseErr.Error()) - break - } - setServerTime(&metadata, serverTime) - } - - if !serverTime.IsZero() { - attemptSkew := serverTime.Sub(responseAt) - setAttemptSkew(&metadata, attemptSkew) - } - - return out, metadata, err -} - -type responseAtKey struct{} - -// GetResponseAt returns the time response was received at. -func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(responseAtKey{}).(time.Time) - return v, ok -} - -// setResponseAt sets the response time on the metadata. -func setResponseAt(metadata *middleware.Metadata, v time.Time) { - metadata.Set(responseAtKey{}, v) -} - -type serverTimeKey struct{} - -// GetServerTime returns the server time for response. -func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { - v, ok = metadata.Get(serverTimeKey{}).(time.Time) - return v, ok -} - -// setServerTime sets the server time on the metadata. -func setServerTime(metadata *middleware.Metadata, v time.Time) { - metadata.Set(serverTimeKey{}, v) -} - -type attemptSkewKey struct{} - -// GetAttemptSkew returns Attempt clock skew for response from metadata. -func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { - v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) - return v, ok -} - -// setAttemptSkew sets the attempt clock skew on the metadata. -func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { - metadata.Set(attemptSkewKey{}, v) -} - -// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack -func AddClientRequestIDMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ClientRequestID{}, middleware.After) -} - -// AddRecordResponseTiming adds RecordResponseTiming middleware to the -// middleware stack. -func AddRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) -} - -// rawResponseKey is the accessor key used to store and access the -// raw response within the response metadata. -type rawResponseKey struct{} - -// AddRawResponse middleware adds raw response on to the metadata -type AddRawResponse struct{} - -// ID the identifier for the ClientRequestID -func (m *AddRawResponse) ID() string { - return "AddRawResponseToMetadata" -} - -// HandleDeserialize adds raw response on the middleware metadata -func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - metadata.Set(rawResponseKey{}, out.RawResponse) - return out, metadata, err -} - -// AddRawResponseToMetadata adds middleware to the middleware stack that -// store raw response on to the metadata. -func AddRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before) -} - -// GetRawResponse returns raw response set on metadata -func GetRawResponse(metadata middleware.Metadata) interface{} { - return metadata.Get(rawResponseKey{}) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go deleted file mode 100644 index ba262dadc..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - os = "macos" - case "ios": - os = "ios" - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go deleted file mode 100644 index e14a1e4ec..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package middleware - -import "runtime" - -func getNormalizedOSName() (os string) { - switch runtime.GOOS { - case "android": - os = "android" - case "linux": - os = "linux" - case "windows": - os = "windows" - case "darwin": - // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 - // For now declare this as "other" until we have a better detection mechanism. - fallthrough - default: - os = "other" - } - return os -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go deleted file mode 100644 index 3f6aaf231..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go +++ /dev/null @@ -1,94 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "os" -) - -const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" -const envAmznTraceID = "_X_AMZN_TRACE_ID" -const amznTraceIDHeader = "X-Amzn-Trace-Id" - -// AddRecursionDetection adds recursionDetection to the middleware stack -func AddRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&RecursionDetection{}, middleware.After) -} - -// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent -// to avoid recursion invocation in Lambda -type RecursionDetection struct{} - -// ID returns the middleware identifier -func (m *RecursionDetection) ID() string { - return "RecursionDetection" -} - -// HandleBuild detects Lambda environment and adds its trace ID to request header if absent -func (m *RecursionDetection) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) - xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) - value := req.Header.Get(amznTraceIDHeader) - // only set the X-Amzn-Trace-Id header when it is not set initially, the - // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists - if value != "" || !hasLambdaEnv || !hasTraceID { - return next.HandleBuild(ctx, in) - } - - req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) - return next.HandleBuild(ctx, in) -} - -func percentEncode(s string) string { - upperhex := "0123456789ABCDEF" - hexCount := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEncode(c) { - hexCount++ - } - } - - if hexCount == 0 { - return s - } - - required := len(s) + 2*hexCount - t := make([]byte, required) - j := 0 - for i := 0; i < len(s); i++ { - if c := s[i]; shouldEncode(c) { - t[j] = '%' - t[j+1] = upperhex[c>>4] - t[j+2] = upperhex[c&15] - j += 3 - } else { - t[j] = c - j++ - } - } - return string(t) -} - -func shouldEncode(c byte) bool { - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - return false - } - switch c { - case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': - return false - default: - return true - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go deleted file mode 100644 index dd3391fe4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go +++ /dev/null @@ -1,27 +0,0 @@ -package middleware - -import ( - "github.com/aws/smithy-go/middleware" -) - -// requestIDKey is used to retrieve request id from response metadata -type requestIDKey struct{} - -// SetRequestIDMetadata sets the provided request id over middleware metadata -func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { - metadata.Set(requestIDKey{}, id) -} - -// GetRequestIDMetadata retrieves the request id from middleware metadata -// returns string and bool indicating value of request id, whether request id was set. -func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { - if !metadata.Has(requestIDKey{}) { - return "", false - } - - v, ok := metadata.Get(requestIDKey{}).(string) - if !ok { - return "", true - } - return v, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go deleted file mode 100644 index 128b60a73..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ /dev/null @@ -1,57 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddRequestIDRetrieverMiddleware adds request id retriever middleware -func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before operation deserializers so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before) -} - -// RequestIDRetriever middleware captures the AWS service request ID from the -// raw response. -type RequestIDRetriever struct { -} - -// ID returns the middleware identifier -func (m *RequestIDRetriever) ID() string { - return "RequestIDRetriever" -} - -// HandleDeserialize pulls the AWS request ID from the response, storing it in -// operation metadata. -func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // Different header which can map to request id - requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} - - for _, h := range requestIDHeaderList { - // check for headers known to contain Request id - if v := resp.Header.Get(h); len(v) != 0 { - // set reqID on metadata for successful responses. - SetRequestIDMetadata(&metadata, v) - - span, _ := tracing.GetSpan(ctx) - span.SetProperty("aws.request_id", v) - break - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go deleted file mode 100644 index 01d758d5f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ /dev/null @@ -1,323 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "os" - "runtime" - "sort" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -var languageVersion = strings.TrimPrefix(runtime.Version(), "go") - -// SDKAgentKeyType is the metadata type to add to the SDK agent string -type SDKAgentKeyType int - -// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will -// be mapped to AdditionalMetadata. -const ( - _ SDKAgentKeyType = iota - APIMetadata - OperatingSystemMetadata - LanguageMetadata - EnvironmentMetadata - FeatureMetadata - ConfigMetadata - FrameworkMetadata - AdditionalMetadata - ApplicationIdentifier - FeatureMetadata2 -) - -// Hardcoded value to specify which version of the user agent we're using -const uaMetadata = "ua/2.1" - -func (k SDKAgentKeyType) string() string { - switch k { - case APIMetadata: - return "api" - case OperatingSystemMetadata: - return "os" - case LanguageMetadata: - return "lang" - case EnvironmentMetadata: - return "exec-env" - case FeatureMetadata: - return "ft" - case ConfigMetadata: - return "cfg" - case FrameworkMetadata: - return "lib" - case ApplicationIdentifier: - return "app" - case FeatureMetadata2: - return "m" - case AdditionalMetadata: - fallthrough - default: - return "md" - } -} - -const execEnvVar = `AWS_EXECUTION_ENV` - -var validChars = map[rune]bool{ - '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, - '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, -} - -// UserAgentFeature enumerates tracked SDK features. -type UserAgentFeature string - -// Enumerates UserAgentFeature. -const ( - UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) - UserAgentFeatureWaiter = "B" - UserAgentFeaturePaginator = "C" - UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) - UserAgentFeatureRetryModeStandard = "E" - UserAgentFeatureRetryModeAdaptive = "F" - UserAgentFeatureS3Transfer = "G" - UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) - UserAgentFeatureS3CryptoV2 = "I" // n/a - UserAgentFeatureS3ExpressBucket = "J" - UserAgentFeatureS3AccessGrants = "K" // not yet implemented - UserAgentFeatureGZIPRequestCompression = "L" - UserAgentFeatureProtocolRPCV2CBOR = "M" - UserAgentFeatureRequestChecksumCRC32 = "U" - UserAgentFeatureRequestChecksumCRC32C = "V" - UserAgentFeatureRequestChecksumCRC64 = "W" - UserAgentFeatureRequestChecksumSHA1 = "X" - UserAgentFeatureRequestChecksumSHA256 = "Y" - UserAgentFeatureRequestChecksumWhenSupported = "Z" - UserAgentFeatureRequestChecksumWhenRequired = "a" - UserAgentFeatureResponseChecksumWhenSupported = "b" - UserAgentFeatureResponseChecksumWhenRequired = "c" -) - -// RequestUserAgent is a build middleware that set the User-Agent for the request. -type RequestUserAgent struct { - sdkAgent, userAgent *smithyhttp.UserAgentBuilder - features map[UserAgentFeature]struct{} -} - -// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the -// request. -// -// User-Agent example: -// -// aws-sdk-go-v2/1.2.3 -// -// X-Amz-User-Agent example: -// -// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 -func NewRequestUserAgent() *RequestUserAgent { - userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() - addProductName(userAgent) - addUserAgentMetadata(userAgent) - addProductName(sdkAgent) - - r := &RequestUserAgent{ - sdkAgent: sdkAgent, - userAgent: userAgent, - features: map[UserAgentFeature]struct{}{}, - } - - addSDKMetadata(r) - - return r -} - -func addSDKMetadata(r *RequestUserAgent) { - r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) - r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) - r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) - if ev := os.Getenv(execEnvVar); len(ev) > 0 { - r.AddSDKAgentKey(EnvironmentMetadata, ev) - } -} - -func addProductName(builder *smithyhttp.UserAgentBuilder) { - builder.AddKeyValue(aws.SDKName, aws.SDKVersion) -} - -func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) { - builder.AddKey(uaMetadata) -} - -// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKey(key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKey(key) - return nil - } -} - -// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddUserAgentKeyValue(key, value) - return nil - } -} - -// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKey(keyType, key) - return nil - } -} - -// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. -func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { - return func(stack *middleware.Stack) error { - requestUserAgent, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) - return nil - } -} - -// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. -func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { - _, err := getOrAddRequestUserAgent(stack) - return err -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) { - id := (*RequestUserAgent)(nil).ID() - bm, ok := stack.Build.Get(id) - if !ok { - bm = NewRequestUserAgent() - err := stack.Build.Add(bm, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := bm.(*RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) - } - - return requestUserAgent, nil -} - -// AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKey(key string) { - u.userAgent.AddKey(strings.Map(rules, key)) -} - -// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { - u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) -} - -// AddUserAgentFeature adds the feature ID to the tracking list to be emitted -// in the final User-Agent string. -func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { - u.features[feature] = struct{}{} -} - -// AddSDKAgentKey adds the component identified by name to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { - // TODO: should target sdkAgent - u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) -} - -// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { - // TODO: should target sdkAgent - u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) -} - -// ID the name of the middleware. -func (u *RequestUserAgent) ID() string { - return "UserAgent" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - switch req := in.Request.(type) { - case *smithyhttp.Request: - u.addHTTPUserAgent(req) - // TODO: To be re-enabled - // u.addHTTPSDKAgent(req) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - return next.HandleBuild(ctx, in) -} - -func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { - const userAgent = "User-Agent" - if len(u.features) > 0 { - updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) - } - updateHTTPHeader(request, userAgent, u.userAgent.Build()) -} - -func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { - const sdkAgent = "X-Amz-User-Agent" - updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) -} - -func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { - var current string - if v := request.Header[header]; len(v) > 0 { - current = v[0] - } - if len(current) > 0 { - current = value + " " + current - } else { - current = value - } - request.Header[header] = append(request.Header[header][:0], current) -} - -func rules(r rune) rune { - switch { - case r >= '0' && r <= '9': - return r - case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': - return r - case validChars[r]: - return r - default: - return '-' - } -} - -func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { - fs := make([]string, 0, len(features)) - for f := range features { - fs = append(fs, string(f)) - } - - sort.Strings(fs) - return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md deleted file mode 100644 index a4872987a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ /dev/null @@ -1,130 +0,0 @@ -# v1.6.7 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. - -# v1.6.6 (2024-10-04) - -* No change notes available for this release. - -# v1.6.5 (2024-09-20) - -* No change notes available for this release. - -# v1.6.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.6.3 (2024-06-28) - -* No change notes available for this release. - -# v1.6.2 (2024-03-29) - -* No change notes available for this release. - -# v1.6.1 (2024-02-21) - -* No change notes available for this release. - -# v1.6.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.5.4 (2023-12-07) - -* No change notes available for this release. - -# v1.5.3 (2023-11-30) - -* No change notes available for this release. - -# v1.5.2 (2023-11-29) - -* No change notes available for this release. - -# v1.5.1 (2023-11-15) - -* No change notes available for this release. - -# v1.5.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). - -# v1.4.14 (2023-10-06) - -* No change notes available for this release. - -# v1.4.13 (2023-08-18) - -* No change notes available for this release. - -# v1.4.12 (2023-08-07) - -* No change notes available for this release. - -# v1.4.11 (2023-07-31) - -* No change notes available for this release. - -# v1.4.10 (2022-12-02) - -* No change notes available for this release. - -# v1.4.9 (2022-10-24) - -* No change notes available for this release. - -# v1.4.8 (2022-09-14) - -* No change notes available for this release. - -# v1.4.7 (2022-09-02) - -* No change notes available for this release. - -# v1.4.6 (2022-08-31) - -* No change notes available for this release. - -# v1.4.5 (2022-08-29) - -* No change notes available for this release. - -# v1.4.4 (2022-08-09) - -* No change notes available for this release. - -# v1.4.3 (2022-06-29) - -* No change notes available for this release. - -# v1.4.2 (2022-06-07) - -* No change notes available for this release. - -# v1.4.1 (2022-03-24) - -* No change notes available for this release. - -# v1.4.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.3.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.1.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.0.0 (2021-11-06) - -* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release. -* **Release**: Protocol support has been added for AWS event stream. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go deleted file mode 100644 index 151054971..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go +++ /dev/null @@ -1,144 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "strconv" -) - -type decodedMessage struct { - rawMessage - Headers decodedHeaders `json:"headers"` -} -type jsonMessage struct { - Length json.Number `json:"total_length"` - HeadersLen json.Number `json:"headers_length"` - PreludeCRC json.Number `json:"prelude_crc"` - Headers decodedHeaders `json:"headers"` - Payload []byte `json:"payload"` - CRC json.Number `json:"message_crc"` -} - -func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { - var jsonMsg jsonMessage - if err = json.Unmarshal(b, &jsonMsg); err != nil { - return err - } - - d.Length, err = numAsUint32(jsonMsg.Length) - if err != nil { - return err - } - d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) - if err != nil { - return err - } - d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) - if err != nil { - return err - } - d.Headers = jsonMsg.Headers - d.Payload = jsonMsg.Payload - d.CRC, err = numAsUint32(jsonMsg.CRC) - if err != nil { - return err - } - - return nil -} - -func (d *decodedMessage) MarshalJSON() ([]byte, error) { - jsonMsg := jsonMessage{ - Length: json.Number(strconv.Itoa(int(d.Length))), - HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), - PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), - Headers: d.Headers, - Payload: d.Payload, - CRC: json.Number(strconv.Itoa(int(d.CRC))), - } - - return json.Marshal(jsonMsg) -} - -func numAsUint32(n json.Number) (uint32, error) { - v, err := n.Int64() - if err != nil { - return 0, fmt.Errorf("failed to get int64 json number, %v", err) - } - - return uint32(v), nil -} - -func (d decodedMessage) Message() Message { - return Message{ - Headers: Headers(d.Headers), - Payload: d.Payload, - } -} - -type decodedHeaders Headers - -func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { - var jsonHeaders []struct { - Name string `json:"name"` - Type valueType `json:"type"` - Value interface{} `json:"value"` - } - - decoder := json.NewDecoder(bytes.NewReader(b)) - decoder.UseNumber() - if err := decoder.Decode(&jsonHeaders); err != nil { - return err - } - - var headers Headers - for _, h := range jsonHeaders { - value, err := valueFromType(h.Type, h.Value) - if err != nil { - return err - } - headers.Set(h.Name, value) - } - *hs = decodedHeaders(headers) - - return nil -} - -func valueFromType(typ valueType, val interface{}) (Value, error) { - switch typ { - case trueValueType: - return BoolValue(true), nil - case falseValueType: - return BoolValue(false), nil - case int8ValueType: - v, err := val.(json.Number).Int64() - return Int8Value(int8(v)), err - case int16ValueType: - v, err := val.(json.Number).Int64() - return Int16Value(int16(v)), err - case int32ValueType: - v, err := val.(json.Number).Int64() - return Int32Value(int32(v)), err - case int64ValueType: - v, err := val.(json.Number).Int64() - return Int64Value(v), err - case bytesValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return BytesValue(v), err - case stringValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - return StringValue(string(v)), err - case timestampValueType: - v, err := val.(json.Number).Int64() - return TimestampValue(timeFromEpochMilli(v)), err - case uuidValueType: - v, err := base64.StdEncoding.DecodeString(val.(string)) - var tv UUIDValue - copy(tv[:], v) - return tv, err - default: - panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go deleted file mode 100644 index d9ab7652f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go +++ /dev/null @@ -1,218 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/aws/smithy-go/logging" - "hash" - "hash/crc32" - "io" -) - -// DecoderOptions is the Decoder configuration options. -type DecoderOptions struct { - Logger logging.Logger - LogMessages bool -} - -// Decoder provides decoding of an Event Stream messages. -type Decoder struct { - options DecoderOptions -} - -// NewDecoder initializes and returns a Decoder for decoding event -// stream messages from the reader provided. -func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder { - options := DecoderOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Decoder{ - options: options, - } -} - -// Decode attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if decodeMessage fails to read -// the message from the stream. -// -// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers -// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying -// payloadBuf byte slice. -func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { - if d.options.Logger != nil && d.options.LogMessages { - debugMsgBuf := bytes.NewBuffer(nil) - reader = io.TeeReader(reader, debugMsgBuf) - defer func() { - logMessageDecode(d.options.Logger, debugMsgBuf, m, err) - }() - } - - m, err = decodeMessage(reader, payloadBuf) - - return m, err -} - -// decodeMessage attempts to decode a single message from the event stream reader. -// Will return the event stream message, or error if decodeMessage fails to read -// the message from the reader. -func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) { - crc := crc32.New(crc32IEEETable) - hashReader := io.TeeReader(reader, crc) - - prelude, err := decodePrelude(hashReader, crc) - if err != nil { - return Message{}, err - } - - if prelude.HeadersLen > 0 { - lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) - m.Headers, err = decodeHeaders(lr) - if err != nil { - return Message{}, err - } - } - - if payloadLen := prelude.PayloadLen(); payloadLen > 0 { - buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) - if err != nil { - return Message{}, err - } - m.Payload = buf - } - - msgCRC := crc.Sum32() - if err := validateCRC(reader, msgCRC); err != nil { - return Message{}, err - } - - return m, nil -} - -func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Logf(logging.Debug, w.String()) }() - - fmt.Fprintf(w, "Raw message:\n%s\n", - hex.Dump(msgBuf.Bytes())) - - if decodeErr != nil { - fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr) - return - } - - rawMsg, err := msg.rawMessage() - if err != nil { - fmt.Fprintf(w, "failed to create raw message, %v\n", err) - return - } - - decodedMsg := decodedMessage{ - rawMessage: rawMsg, - Headers: decodedHeaders(msg.Headers), - } - - fmt.Fprintf(w, "Decoded message:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(decodedMsg); err != nil { - fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) - } -} - -func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { - var p messagePrelude - - var err error - p.Length, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - p.HeadersLen, err = decodeUint32(r) - if err != nil { - return messagePrelude{}, err - } - - if err := p.ValidateLens(); err != nil { - return messagePrelude{}, err - } - - preludeCRC := crc.Sum32() - if err := validateCRC(r, preludeCRC); err != nil { - return messagePrelude{}, err - } - - p.PreludeCRC = preludeCRC - - return p, nil -} - -func decodePayload(buf []byte, r io.Reader) ([]byte, error) { - w := bytes.NewBuffer(buf[0:0]) - - _, err := io.Copy(w, r) - return w.Bytes(), err -} - -func decodeUint8(r io.Reader) (uint8, error) { - type byteReader interface { - ReadByte() (byte, error) - } - - if br, ok := r.(byteReader); ok { - v, err := br.ReadByte() - return v, err - } - - var b [1]byte - _, err := io.ReadFull(r, b[:]) - return b[0], err -} - -func decodeUint16(r io.Reader) (uint16, error) { - var b [2]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(bs), nil -} - -func decodeUint32(r io.Reader) (uint32, error) { - var b [4]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(bs), nil -} - -func decodeUint64(r io.Reader) (uint64, error) { - var b [8]byte - bs := b[:] - _, err := io.ReadFull(r, bs) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint64(bs), nil -} - -func validateCRC(r io.Reader, expect uint32) error { - msgCRC, err := decodeUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return ChecksumError{} - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go deleted file mode 100644 index f03ee4b93..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go +++ /dev/null @@ -1,167 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/aws/smithy-go/logging" - "hash" - "hash/crc32" - "io" -) - -// EncoderOptions is the configuration options for Encoder. -type EncoderOptions struct { - Logger logging.Logger - LogMessages bool -} - -// Encoder provides EventStream message encoding. -type Encoder struct { - options EncoderOptions - - headersBuf *bytes.Buffer - messageBuf *bytes.Buffer -} - -// NewEncoder initializes and returns an Encoder to encode Event Stream -// messages. -func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder { - o := EncoderOptions{} - - for _, fn := range optFns { - fn(&o) - } - - return &Encoder{ - options: o, - headersBuf: bytes.NewBuffer(nil), - messageBuf: bytes.NewBuffer(nil), - } -} - -// Encode encodes a single EventStream message to the io.Writer the Encoder -// was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(w io.Writer, msg Message) (err error) { - e.headersBuf.Reset() - e.messageBuf.Reset() - - var writer io.Writer = e.messageBuf - if e.options.Logger != nil && e.options.LogMessages { - encodeMsgBuf := bytes.NewBuffer(nil) - writer = io.MultiWriter(writer, encodeMsgBuf) - defer func() { - logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err) - }() - } - - if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { - return err - } - - crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(writer, crc) - - headersLen := uint32(e.headersBuf.Len()) - payloadLen := uint32(len(msg.Payload)) - - if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { - return err - } - - if headersLen > 0 { - if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { - return err - } - } - - if payloadLen > 0 { - if _, err = hashWriter.Write(msg.Payload); err != nil { - return err - } - } - - msgCRC := crc.Sum32() - if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil { - return err - } - - _, err = io.Copy(w, e.messageBuf) - - return err -} - -func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { - w := bytes.NewBuffer(nil) - defer func() { logger.Logf(logging.Debug, w.String()) }() - - fmt.Fprintf(w, "Message to encode:\n") - encoder := json.NewEncoder(w) - if err := encoder.Encode(msg); err != nil { - fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) - } - - if encodeErr != nil { - fmt.Fprintf(w, "Encode error: %v\n", encodeErr) - return - } - - fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) -} - -func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { - p := messagePrelude{ - Length: minMsgLen + headersLen + payloadLen, - HeadersLen: headersLen, - } - if err := p.ValidateLens(); err != nil { - return err - } - - err := binaryWriteFields(w, binary.BigEndian, - p.Length, - p.HeadersLen, - ) - if err != nil { - return err - } - - p.PreludeCRC = crc.Sum32() - err = binary.Write(w, binary.BigEndian, p.PreludeCRC) - if err != nil { - return err - } - - return nil -} - -// EncodeHeaders writes the header values to the writer encoded in the event -// stream format. Returns an error if a header fails to encode. -func EncodeHeaders(w io.Writer, headers Headers) error { - for _, h := range headers { - hn := headerName{ - Len: uint8(len(h.Name)), - } - copy(hn.Name[:hn.Len], h.Name) - if err := hn.encode(w); err != nil { - return err - } - - if err := h.Value.encode(w); err != nil { - return err - } - } - - return nil -} - -func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { - for _, v := range vs { - if err := binary.Write(w, order, v); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go deleted file mode 100644 index 5481ef307..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go +++ /dev/null @@ -1,23 +0,0 @@ -package eventstream - -import "fmt" - -// LengthError provides the error for items being larger than a maximum length. -type LengthError struct { - Part string - Want int - Have int - Value interface{} -} - -func (e LengthError) Error() string { - return fmt.Sprintf("%s length invalid, %d/%d, %v", - e.Part, e.Want, e.Have, e.Value) -} - -// ChecksumError provides the error for message checksum invalidation errors. -type ChecksumError struct{} - -func (e ChecksumError) Error() string { - return "message checksum mismatch" -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go deleted file mode 100644 index 93ea71ffd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go +++ /dev/null @@ -1,24 +0,0 @@ -package eventstreamapi - -// EventStream headers with specific meaning to async API functionality. -const ( - ChunkSignatureHeader = `:chunk-signature` // chunk signature for message - DateHeader = `:date` // Date header for signature - ContentTypeHeader = ":content-type" // message payload content-type - - // Message header and values - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go deleted file mode 100644 index d07ff6b89..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go +++ /dev/null @@ -1,71 +0,0 @@ -package eventstreamapi - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -type eventStreamWriterKey struct{} - -// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream. -func GetInputStreamWriter(ctx context.Context) io.WriteCloser { - writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser) - return writeCloser -} - -func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context { - return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser) -} - -// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages -// via the HTTP request body. -type InitializeStreamWriter struct{} - -// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack. -func AddInitializeStreamWriter(stack *middleware.Stack) error { - return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After) -} - -// ID returns the identifier for the middleware. -func (i *InitializeStreamWriter) ID() string { - return "InitializeStreamWriter" -} - -// HandleFinalize is the middleware implementation. -func (i *InitializeStreamWriter) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - - inputReader, inputWriter := io.Pipe() - defer func() { - if err == nil { - return - } - _ = inputReader.Close() - _ = inputWriter.Close() - }() - - request, err = request.SetStream(inputReader) - if err != nil { - return out, metadata, err - } - in.Request = request - - ctx = setInputStreamWriter(ctx, inputWriter) - - out, metadata, err = next.HandleFinalize(ctx, in) - if err != nil { - return out, metadata, err - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go deleted file mode 100644 index cbf5a2862..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -package eventstreamapi - -import smithyhttp "github.com/aws/smithy-go/transport/http" - -// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. -// -// This operation is a no-op for Go 1.18 and above. -func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go deleted file mode 100644 index 7d10ec2eb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.18 -// +build !go1.18 - -package eventstreamapi - -import smithyhttp "github.com/aws/smithy-go/transport/http" - -// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality. -func ApplyHTTPTransportFixes(r *smithyhttp.Request) error { - r.Header.Set("Expect", "100-continue") - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go deleted file mode 100644 index 78af41cbe..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package eventstream - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.6.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go deleted file mode 100644 index f6f8c5674..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go +++ /dev/null @@ -1,175 +0,0 @@ -package eventstream - -import ( - "encoding/binary" - "fmt" - "io" -) - -// Headers are a collection of EventStream header values. -type Headers []Header - -// Header is a single EventStream Key Value header pair. -type Header struct { - Name string - Value Value -} - -// Set associates the name with a value. If the header name already exists in -// the Headers the value will be replaced with the new one. -func (hs *Headers) Set(name string, value Value) { - var i int - for ; i < len(*hs); i++ { - if (*hs)[i].Name == name { - (*hs)[i].Value = value - return - } - } - - *hs = append(*hs, Header{ - Name: name, Value: value, - }) -} - -// Get returns the Value associated with the header. Nil is returned if the -// value does not exist. -func (hs Headers) Get(name string) Value { - for i := 0; i < len(hs); i++ { - if h := hs[i]; h.Name == name { - return h.Value - } - } - return nil -} - -// Del deletes the value in the Headers if it exists. -func (hs *Headers) Del(name string) { - for i := 0; i < len(*hs); i++ { - if (*hs)[i].Name == name { - copy((*hs)[i:], (*hs)[i+1:]) - (*hs) = (*hs)[:len(*hs)-1] - } - } -} - -// Clone returns a deep copy of the headers -func (hs Headers) Clone() Headers { - o := make(Headers, 0, len(hs)) - for _, h := range hs { - o.Set(h.Name, h.Value) - } - return o -} - -func decodeHeaders(r io.Reader) (Headers, error) { - hs := Headers{} - - for { - name, err := decodeHeaderName(r) - if err != nil { - if err == io.EOF { - // EOF while getting header name means no more headers - break - } - return nil, err - } - - value, err := decodeHeaderValue(r) - if err != nil { - return nil, err - } - - hs.Set(name, value) - } - - return hs, nil -} - -func decodeHeaderName(r io.Reader) (string, error) { - var n headerName - - var err error - n.Len, err = decodeUint8(r) - if err != nil { - return "", err - } - - name := n.Name[:n.Len] - if _, err := io.ReadFull(r, name); err != nil { - return "", err - } - - return string(name), nil -} - -func decodeHeaderValue(r io.Reader) (Value, error) { - var raw rawValue - - typ, err := decodeUint8(r) - if err != nil { - return nil, err - } - raw.Type = valueType(typ) - - var v Value - - switch raw.Type { - case trueValueType: - v = BoolValue(true) - case falseValueType: - v = BoolValue(false) - case int8ValueType: - var tv Int8Value - err = tv.decode(r) - v = tv - case int16ValueType: - var tv Int16Value - err = tv.decode(r) - v = tv - case int32ValueType: - var tv Int32Value - err = tv.decode(r) - v = tv - case int64ValueType: - var tv Int64Value - err = tv.decode(r) - v = tv - case bytesValueType: - var tv BytesValue - err = tv.decode(r) - v = tv - case stringValueType: - var tv StringValue - err = tv.decode(r) - v = tv - case timestampValueType: - var tv TimestampValue - err = tv.decode(r) - v = tv - case uuidValueType: - var tv UUIDValue - err = tv.decode(r) - v = tv - default: - panic(fmt.Sprintf("unknown value type %d", raw.Type)) - } - - // Error could be EOF, let caller deal with it - return v, err -} - -const maxHeaderNameLen = 255 - -type headerName struct { - Len uint8 - Name [maxHeaderNameLen]byte -} - -func (v headerName) encode(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { - return err - } - - _, err := w.Write(v.Name[:v.Len]) - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go deleted file mode 100644 index 423b6bb26..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go +++ /dev/null @@ -1,521 +0,0 @@ -package eventstream - -import ( - "encoding/base64" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "strconv" - "time" -) - -const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 - -// valueType is the EventStream header value type. -type valueType uint8 - -// Header value types -const ( - trueValueType valueType = iota - falseValueType - int8ValueType // Byte - int16ValueType // Short - int32ValueType // Integer - int64ValueType // Long - bytesValueType - stringValueType - timestampValueType - uuidValueType -) - -func (t valueType) String() string { - switch t { - case trueValueType: - return "bool" - case falseValueType: - return "bool" - case int8ValueType: - return "int8" - case int16ValueType: - return "int16" - case int32ValueType: - return "int32" - case int64ValueType: - return "int64" - case bytesValueType: - return "byte_array" - case stringValueType: - return "string" - case timestampValueType: - return "timestamp" - case uuidValueType: - return "uuid" - default: - return fmt.Sprintf("unknown value type %d", uint8(t)) - } -} - -type rawValue struct { - Type valueType - Len uint16 // Only set for variable length slices - Value []byte // byte representation of value, BigEndian encoding. -} - -func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { - return binaryWriteFields(w, binary.BigEndian, - r.Type, - v, - ) -} - -func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { - binary.Write(w, binary.BigEndian, r.Type) - - _, err := w.Write(v) - return err -} - -func (r rawValue) encodeBytes(w io.Writer, v []byte) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - _, err = w.Write(v) - return err -} - -func (r rawValue) encodeString(w io.Writer, v string) error { - if len(v) > maxHeaderValueLen { - return LengthError{ - Part: "header value", - Want: maxHeaderValueLen, Have: len(v), - Value: v, - } - } - r.Len = uint16(len(v)) - - type stringWriter interface { - WriteString(string) (int, error) - } - - err := binaryWriteFields(w, binary.BigEndian, - r.Type, - r.Len, - ) - if err != nil { - return err - } - - if sw, ok := w.(stringWriter); ok { - _, err = sw.WriteString(v) - } else { - _, err = w.Write([]byte(v)) - } - - return err -} - -func decodeFixedBytesValue(r io.Reader, buf []byte) error { - _, err := io.ReadFull(r, buf) - return err -} - -func decodeBytesValue(r io.Reader) ([]byte, error) { - var raw rawValue - var err error - raw.Len, err = decodeUint16(r) - if err != nil { - return nil, err - } - - buf := make([]byte, raw.Len) - _, err = io.ReadFull(r, buf) - if err != nil { - return nil, err - } - - return buf, nil -} - -func decodeStringValue(r io.Reader) (string, error) { - v, err := decodeBytesValue(r) - return string(v), err -} - -// Value represents the abstract header value. -type Value interface { - Get() interface{} - String() string - valueType() valueType - encode(io.Writer) error -} - -// An BoolValue provides eventstream encoding, and representation -// of a Go bool value. -type BoolValue bool - -// Get returns the underlying type -func (v BoolValue) Get() interface{} { - return bool(v) -} - -// valueType returns the EventStream header value type value. -func (v BoolValue) valueType() valueType { - if v { - return trueValueType - } - return falseValueType -} - -func (v BoolValue) String() string { - return strconv.FormatBool(bool(v)) -} - -// encode encodes the BoolValue into an eventstream binary value -// representation. -func (v BoolValue) encode(w io.Writer) error { - return binary.Write(w, binary.BigEndian, v.valueType()) -} - -// An Int8Value provides eventstream encoding, and representation of a Go -// int8 value. -type Int8Value int8 - -// Get returns the underlying value. -func (v Int8Value) Get() interface{} { - return int8(v) -} - -// valueType returns the EventStream header value type value. -func (Int8Value) valueType() valueType { - return int8ValueType -} - -func (v Int8Value) String() string { - return fmt.Sprintf("0x%02x", int8(v)) -} - -// encode encodes the Int8Value into an eventstream binary value -// representation. -func (v Int8Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeScalar(w, v) -} - -func (v *Int8Value) decode(r io.Reader) error { - n, err := decodeUint8(r) - if err != nil { - return err - } - - *v = Int8Value(n) - return nil -} - -// An Int16Value provides eventstream encoding, and representation of a Go -// int16 value. -type Int16Value int16 - -// Get returns the underlying value. -func (v Int16Value) Get() interface{} { - return int16(v) -} - -// valueType returns the EventStream header value type value. -func (Int16Value) valueType() valueType { - return int16ValueType -} - -func (v Int16Value) String() string { - return fmt.Sprintf("0x%04x", int16(v)) -} - -// encode encodes the Int16Value into an eventstream binary value -// representation. -func (v Int16Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int16Value) decode(r io.Reader) error { - n, err := decodeUint16(r) - if err != nil { - return err - } - - *v = Int16Value(n) - return nil -} - -// An Int32Value provides eventstream encoding, and representation of a Go -// int32 value. -type Int32Value int32 - -// Get returns the underlying value. -func (v Int32Value) Get() interface{} { - return int32(v) -} - -// valueType returns the EventStream header value type value. -func (Int32Value) valueType() valueType { - return int32ValueType -} - -func (v Int32Value) String() string { - return fmt.Sprintf("0x%08x", int32(v)) -} - -// encode encodes the Int32Value into an eventstream binary value -// representation. -func (v Int32Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int32Value) decode(r io.Reader) error { - n, err := decodeUint32(r) - if err != nil { - return err - } - - *v = Int32Value(n) - return nil -} - -// An Int64Value provides eventstream encoding, and representation of a Go -// int64 value. -type Int64Value int64 - -// Get returns the underlying value. -func (v Int64Value) Get() interface{} { - return int64(v) -} - -// valueType returns the EventStream header value type value. -func (Int64Value) valueType() valueType { - return int64ValueType -} - -func (v Int64Value) String() string { - return fmt.Sprintf("0x%016x", int64(v)) -} - -// encode encodes the Int64Value into an eventstream binary value -// representation. -func (v Int64Value) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - return raw.encodeScalar(w, v) -} - -func (v *Int64Value) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = Int64Value(n) - return nil -} - -// An BytesValue provides eventstream encoding, and representation of a Go -// byte slice. -type BytesValue []byte - -// Get returns the underlying value. -func (v BytesValue) Get() interface{} { - return []byte(v) -} - -// valueType returns the EventStream header value type value. -func (BytesValue) valueType() valueType { - return bytesValueType -} - -func (v BytesValue) String() string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -// encode encodes the BytesValue into an eventstream binary value -// representation. -func (v BytesValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeBytes(w, []byte(v)) -} - -func (v *BytesValue) decode(r io.Reader) error { - buf, err := decodeBytesValue(r) - if err != nil { - return err - } - - *v = BytesValue(buf) - return nil -} - -// An StringValue provides eventstream encoding, and representation of a Go -// string. -type StringValue string - -// Get returns the underlying value. -func (v StringValue) Get() interface{} { - return string(v) -} - -// valueType returns the EventStream header value type value. -func (StringValue) valueType() valueType { - return stringValueType -} - -func (v StringValue) String() string { - return string(v) -} - -// encode encodes the StringValue into an eventstream binary value -// representation. -func (v StringValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeString(w, string(v)) -} - -func (v *StringValue) decode(r io.Reader) error { - s, err := decodeStringValue(r) - if err != nil { - return err - } - - *v = StringValue(s) - return nil -} - -// An TimestampValue provides eventstream encoding, and representation of a Go -// timestamp. -type TimestampValue time.Time - -// Get returns the underlying value. -func (v TimestampValue) Get() interface{} { - return time.Time(v) -} - -// valueType returns the EventStream header value type value. -func (TimestampValue) valueType() valueType { - return timestampValueType -} - -func (v TimestampValue) epochMilli() int64 { - nano := time.Time(v).UnixNano() - msec := nano / int64(time.Millisecond) - return msec -} - -func (v TimestampValue) String() string { - msec := v.epochMilli() - return strconv.FormatInt(msec, 10) -} - -// encode encodes the TimestampValue into an eventstream binary value -// representation. -func (v TimestampValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - msec := v.epochMilli() - return raw.encodeScalar(w, msec) -} - -func (v *TimestampValue) decode(r io.Reader) error { - n, err := decodeUint64(r) - if err != nil { - return err - } - - *v = TimestampValue(timeFromEpochMilli(int64(n))) - return nil -} - -// MarshalJSON implements the json.Marshaler interface -func (v TimestampValue) MarshalJSON() ([]byte, error) { - return []byte(v.String()), nil -} - -func timeFromEpochMilli(t int64) time.Time { - secs := t / 1e3 - msec := t % 1e3 - return time.Unix(secs, msec*int64(time.Millisecond)).UTC() -} - -// An UUIDValue provides eventstream encoding, and representation of a UUID -// value. -type UUIDValue [16]byte - -// Get returns the underlying value. -func (v UUIDValue) Get() interface{} { - return v[:] -} - -// valueType returns the EventStream header value type value. -func (UUIDValue) valueType() valueType { - return uuidValueType -} - -func (v UUIDValue) String() string { - var scratch [36]byte - - const dash = '-' - - hex.Encode(scratch[:8], v[0:4]) - scratch[8] = dash - hex.Encode(scratch[9:13], v[4:6]) - scratch[13] = dash - hex.Encode(scratch[14:18], v[6:8]) - scratch[18] = dash - hex.Encode(scratch[19:23], v[8:10]) - scratch[23] = dash - hex.Encode(scratch[24:], v[10:]) - - return string(scratch[:]) -} - -// encode encodes the UUIDValue into an eventstream binary value -// representation. -func (v UUIDValue) encode(w io.Writer) error { - raw := rawValue{ - Type: v.valueType(), - } - - return raw.encodeFixedSlice(w, v[:]) -} - -func (v *UUIDValue) decode(r io.Reader) error { - tv := (*v)[:] - return decodeFixedBytesValue(r, tv) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go deleted file mode 100644 index f7427da03..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go +++ /dev/null @@ -1,117 +0,0 @@ -package eventstream - -import ( - "bytes" - "encoding/binary" - "hash/crc32" -) - -const preludeLen = 8 -const preludeCRCLen = 4 -const msgCRCLen = 4 -const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen - -var crc32IEEETable = crc32.MakeTable(crc32.IEEE) - -// A Message provides the eventstream message representation. -type Message struct { - Headers Headers - Payload []byte -} - -func (m *Message) rawMessage() (rawMessage, error) { - var raw rawMessage - - if len(m.Headers) > 0 { - var headers bytes.Buffer - if err := EncodeHeaders(&headers, m.Headers); err != nil { - return rawMessage{}, err - } - raw.Headers = headers.Bytes() - raw.HeadersLen = uint32(len(raw.Headers)) - } - - raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen - - hash := crc32.New(crc32IEEETable) - binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) - raw.PreludeCRC = hash.Sum32() - - binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) - - if raw.HeadersLen > 0 { - hash.Write(raw.Headers) - } - - // Read payload bytes and update hash for it as well. - if len(m.Payload) > 0 { - raw.Payload = m.Payload - hash.Write(raw.Payload) - } - - raw.CRC = hash.Sum32() - - return raw, nil -} - -// Clone returns a deep copy of the message. -func (m Message) Clone() Message { - var payload []byte - if m.Payload != nil { - payload = make([]byte, len(m.Payload)) - copy(payload, m.Payload) - } - - return Message{ - Headers: m.Headers.Clone(), - Payload: payload, - } -} - -type messagePrelude struct { - Length uint32 - HeadersLen uint32 - PreludeCRC uint32 -} - -func (p messagePrelude) PayloadLen() uint32 { - return p.Length - p.HeadersLen - minMsgLen -} - -func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { - return LengthError{ - Part: "message prelude", - Want: maxMsgLen, - Have: int(p.Length), - } - } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - - return nil -} - -type rawMessage struct { - messagePrelude - - Headers []byte - Payload []byte - - CRC uint32 -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go deleted file mode 100644 index 47ebc0f54..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ /dev/null @@ -1,72 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Array represents the encoding of Query lists and sets. A Query array is a -// representation of a list of values of a fixed type. A serialized array might -// look like the following: -// -// ListName.member.1=foo -// &ListName.member.2=bar -// &Listname.member.3=baz -type Array struct { - // The query values to add the array to. - values url.Values - // The array's prefix, which includes the names of all parent structures - // and ends with the name of the list. For example, the prefix might be - // "ParentStructure.ListName". This prefix will be used to form the full - // keys for each element in the list. For example, an entry might have the - // key "ParentStructure.ListName.member.MemberName.1". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the list is flat or not. A list that is not flat will produce the - // following entry to the url.Values for a given entry: - // ListName.MemberName.1=value - // A list that is flat will produce the following: - // ListName.1=value - flat bool - // The location name of the member. In most cases this should be "member". - memberName string - // Elements are stored in values, so we keep track of the list size here. - size int32 - // Empty lists are encoded as "=", if we add a value later we will - // remove this encoding - emptyValue Value -} - -func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { - emptyValue := newValue(values, prefix, flat) - emptyValue.String("") - - return &Array{ - values: values, - prefix: prefix, - flat: flat, - memberName: memberName, - emptyValue: emptyValue, - } -} - -// Value adds a new element to the Query Array. Returns a Value type used to -// encode the array element. -func (a *Array) Value() Value { - if a.size == 0 { - delete(a.values, a.emptyValue.key) - } - - // Query lists start a 1, so adjust the size first - a.size++ - prefix := a.prefix - if !a.flat { - prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) - } - // Lists can't have flat members - return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go deleted file mode 100644 index 2ecf9241c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go +++ /dev/null @@ -1,80 +0,0 @@ -package query - -import ( - "io" - "net/url" - "sort" -) - -// Encoder is a Query encoder that supports construction of Query body -// values using methods. -type Encoder struct { - // The query values that will be built up to manage encoding. - values url.Values - // The writer that the encoded body will be written to. - writer io.Writer - Value -} - -// NewEncoder returns a new Query body encoder -func NewEncoder(writer io.Writer) *Encoder { - values := url.Values{} - return &Encoder{ - values: values, - writer: writer, - Value: newBaseValue(values), - } -} - -// Encode returns the []byte slice representing the current -// state of the Query encoder. -func (e Encoder) Encode() error { - ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) - if !ok { - // Fall back to less optimal byte slice casting if WriteString isn't available. - ws = &wrapWriteString{writer: e.writer} - } - - // Get the keys and sort them to have a stable output - keys := make([]string, 0, len(e.values)) - for k := range e.values { - keys = append(keys, k) - } - sort.Strings(keys) - isFirstEntry := true - for _, key := range keys { - queryValues := e.values[key] - escapedKey := url.QueryEscape(key) - for _, value := range queryValues { - if !isFirstEntry { - if _, err := ws.WriteString(`&`); err != nil { - return err - } - } else { - isFirstEntry = false - } - if _, err := ws.WriteString(escapedKey); err != nil { - return err - } - if _, err := ws.WriteString(`=`); err != nil { - return err - } - if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { - return err - } - } - } - return nil -} - -// wrapWriteString wraps an io.Writer to provide a WriteString method -// where one is not available. -type wrapWriteString struct { - writer io.Writer -} - -// WriteString writes a string to the wrapped writer by casting it to -// a byte array first. -func (w wrapWriteString) WriteString(v string) (int, error) { - return w.writer.Write([]byte(v)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go deleted file mode 100644 index dea242b8b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go +++ /dev/null @@ -1,78 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Map represents the encoding of Query maps. A Query map is a representation -// of a mapping of arbitrary string keys to arbitrary values of a fixed type. -// A Map differs from an Object in that the set of keys is not fixed, in that -// the values must all be of the same type, and that map entries are ordered. -// A serialized map might look like the following: -// -// MapName.entry.1.key=Foo -// &MapName.entry.1.value=spam -// &MapName.entry.2.key=Bar -// &MapName.entry.2.value=eggs -type Map struct { - // The query values to add the map to. - values url.Values - // The map's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.MapName". This prefix will be used to form the full - // keys for each key-value pair of the map. For example, a value might have - // the key "ParentStructure.MapName.1.value". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string - // Whether the map is flat or not. A map that is not flat will produce the - // following entries to the url.Values for a given key-value pair: - // MapName.entry.1.KeyLocationName=mykey - // MapName.entry.1.ValueLocationName=myvalue - // A map that is flat will produce the following: - // MapName.1.KeyLocationName=mykey - // MapName.1.ValueLocationName=myvalue - flat bool - // The location name of the key. In most cases this should be "key". - keyLocationName string - // The location name of the value. In most cases this should be "value". - valueLocationName string - // Elements are stored in values, so we keep track of the list size here. - size int32 -} - -func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { - return &Map{ - values: values, - prefix: prefix, - flat: flat, - keyLocationName: keyLocationName, - valueLocationName: valueLocationName, - } -} - -// Key adds the given named key to the Query map. -// Returns a Value encoder that should be used to encode a Query value type. -func (m *Map) Key(name string) Value { - // Query lists start a 1, so adjust the size first - m.size++ - var key string - var value string - if m.flat { - key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) - } else { - key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) - value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) - } - - // The key can only be a string, so we just go ahead and set it here - newValue(m.values, key, false).String(name) - - // Maps can't have flat members - return newValue(m.values, value, false) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go deleted file mode 100644 index 360344791..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go +++ /dev/null @@ -1,62 +0,0 @@ -package query - -import ( - "context" - "fmt" - "io/ioutil" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the -// operation serializer that will convert the query request body to a GET -// operation with the query message in the HTTP request querystring. -func AddAsGetRequestMiddleware(stack *middleware.Stack) error { - return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) -} - -type asGetRequest struct{} - -func (*asGetRequest) ID() string { return "Query:AsGetRequest" } - -func (m *asGetRequest) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) - } - - req.Method = "GET" - - // If the stream is not set, nothing else to do. - stream := req.GetStream() - if stream == nil { - return next.HandleSerialize(ctx, input) - } - - // Clear the stream since there will not be any body. - req.Header.Del("Content-Type") - req, err = req.SetStream(nil) - if err != nil { - return out, metadata, fmt.Errorf("unable update request body %w", err) - } - input.Request = req - - // Update request query with the body's query string value. - delim := "" - if len(req.URL.RawQuery) != 0 { - delim = "&" - } - - b, err := ioutil.ReadAll(stream) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request body %w", err) - } - req.URL.RawQuery += delim + string(b) - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go deleted file mode 100644 index 455b92515..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "fmt" - "net/url" -) - -// Object represents the encoding of Query structures and unions. A Query -// object is a representation of a mapping of string keys to arbitrary -// values where there is a fixed set of keys whose values each have their -// own known type. A serialized object might look like the following: -// -// ObjectName.Foo=value -// &ObjectName.Bar=5 -type Object struct { - // The query values to add the object to. - values url.Values - // The object's prefix, which includes the names of all parent structures - // and ends with the name of the object. For example, the prefix might be - // "ParentStructure.ObjectName". This prefix will be used to form the full - // keys for each member of the object. For example, a member might have the - // key "ParentStructure.ObjectName.MemberName". - // - // While this is currently represented as a string that gets added to, it - // could also be represented as a stack that only gets condensed into a - // string when a finalized key is created. This could potentially reduce - // allocations. - prefix string -} - -func newObject(values url.Values, prefix string) *Object { - return &Object{ - values: values, - prefix: prefix, - } -} - -// Key adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. -func (o *Object) Key(name string) Value { - return o.key(name, false) -} - -// KeyWithValues adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query list of values. -func (o *Object) KeyWithValues(name string) Value { - return o.keyWithValues(name, false) -} - -// FlatKey adds the given named key to the Query object. -// Returns a Value encoder that should be used to encode a Query value type. The -// value will be flattened if it is a map or array. -func (o *Object) FlatKey(name string) Value { - return o.key(name, true) -} - -func (o *Object) key(name string, flatValue bool) Value { - if o.prefix != "" { - return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) - } - return newValue(o.values, name, flatValue) -} - -func (o *Object) keyWithValues(name string, flatValue bool) Value { - if o.prefix != "" { - return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) - } - return newAppendValue(o.values, name, flatValue) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go deleted file mode 100644 index a9251521f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go +++ /dev/null @@ -1,115 +0,0 @@ -package query - -import ( - "math/big" - "net/url" - - "github.com/aws/smithy-go/encoding/httpbinding" -) - -// Value represents a Query Value type. -type Value struct { - // The query values to add the value to. - values url.Values - // The value's key, which will form the prefix for complex types. - key string - // Whether the value should be flattened or not if it's a flattenable type. - flat bool - queryValue httpbinding.QueryValue -} - -func newValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, false), - } -} - -func newAppendValue(values url.Values, key string, flat bool) Value { - return Value{ - values: values, - key: key, - flat: flat, - queryValue: httpbinding.NewQueryValue(values, key, true), - } -} - -func newBaseValue(values url.Values) Value { - return Value{ - values: values, - queryValue: httpbinding.NewQueryValue(nil, "", false), - } -} - -// Array returns a new Array encoder. -func (qv Value) Array(locationName string) *Array { - return newArray(qv.values, qv.key, qv.flat, locationName) -} - -// Object returns a new Object encoder. -func (qv Value) Object() *Object { - return newObject(qv.values, qv.key) -} - -// Map returns a new Map encoder. -func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { - return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) -} - -// Base64EncodeBytes encodes v as a base64 query string value. -// This is intended to enable compatibility with the JSON encoder. -func (qv Value) Base64EncodeBytes(v []byte) { - qv.queryValue.Blob(v) -} - -// Boolean encodes v as a query string value -func (qv Value) Boolean(v bool) { - qv.queryValue.Boolean(v) -} - -// String encodes v as a query string value -func (qv Value) String(v string) { - qv.queryValue.String(v) -} - -// Byte encodes v as a query string value -func (qv Value) Byte(v int8) { - qv.queryValue.Byte(v) -} - -// Short encodes v as a query string value -func (qv Value) Short(v int16) { - qv.queryValue.Short(v) -} - -// Integer encodes v as a query string value -func (qv Value) Integer(v int32) { - qv.queryValue.Integer(v) -} - -// Long encodes v as a query string value -func (qv Value) Long(v int64) { - qv.queryValue.Long(v) -} - -// Float encodes v as a query string value -func (qv Value) Float(v float32) { - qv.queryValue.Float(v) -} - -// Double encodes v as a query string value -func (qv Value) Double(v float64) { - qv.queryValue.Double(v) -} - -// BigInteger encodes v as a query string value -func (qv Value) BigInteger(v *big.Int) { - qv.queryValue.BigInteger(v) -} - -// BigDecimal encodes v as a query string value -func (qv Value) BigDecimal(v *big.Float) { - qv.queryValue.BigDecimal(v) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go deleted file mode 100644 index 1bce78a4d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go +++ /dev/null @@ -1,85 +0,0 @@ -package restjson - -import ( - "encoding/json" - "io" - "strings" - - "github.com/aws/smithy-go" -) - -// GetErrorInfo util looks for code, __type, and message members in the -// json body. These members are optionally available, and the function -// returns the value of member if it is available. This function is useful to -// identify the error code, msg in a REST JSON error response. -func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { - var errInfo struct { - Code string - Type string `json:"__type"` - Message string - } - - err = decoder.Decode(&errInfo) - if err != nil { - if err == io.EOF { - return errorType, message, nil - } - return errorType, message, err - } - - // assign error type - if len(errInfo.Code) != 0 { - errorType = errInfo.Code - } else if len(errInfo.Type) != 0 { - errorType = errInfo.Type - } - - // assign error message - if len(errInfo.Message) != 0 { - message = errInfo.Message - } - - // sanitize error - if len(errorType) != 0 { - errorType = SanitizeErrorCode(errorType) - } - - return errorType, message, nil -} - -// SanitizeErrorCode sanitizes the errorCode string . -// The rule for sanitizing is if a `:` character is present, then take only the -// contents before the first : character in the value. -// If a # character is present, then take only the contents after the -// first # character in the value. -func SanitizeErrorCode(errorCode string) string { - if strings.ContainsAny(errorCode, ":") { - errorCode = strings.SplitN(errorCode, ":", 2)[0] - } - - if strings.ContainsAny(errorCode, "#") { - errorCode = strings.SplitN(errorCode, "#", 2)[1] - } - - return errorCode -} - -// GetSmithyGenericAPIError returns smithy generic api error and an error interface. -// Takes in json decoder, and error Code string as args. The function retrieves error message -// and error code from the decoder body. If errorCode of length greater than 0 is passed in as -// an argument, it is used instead. -func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { - errorType, message, err := GetErrorInfo(decoder) - if err != nil { - return nil, err - } - - if len(errorCode) == 0 { - errorCode = errorType - } - - return &smithy.GenericAPIError{ - Code: errorCode, - Message: message, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go deleted file mode 100644 index 6975ce652..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go +++ /dev/null @@ -1,48 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string - RequestID string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents(errResponse), nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal Error wrapping -type noWrappedErrorResponse struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - RequestID string `xml:"RequestId"` -} - -// wrappedErrorResponse represents the error response body -// wrapped within Error -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go deleted file mode 100644 index 8c7836410..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go +++ /dev/null @@ -1,20 +0,0 @@ -package ratelimit - -import "context" - -// None implements a no-op rate limiter which effectively disables client-side -// rate limiting (also known as "retry quotas"). -// -// GetToken does nothing and always returns a nil error. The returned -// token-release function does nothing, and always returns a nil error. -// -// AddTokens does nothing and always returns a nil error. -var None = &none{} - -type none struct{} - -func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { - return func() error { return nil }, nil -} - -func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go deleted file mode 100644 index 974ef594f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go +++ /dev/null @@ -1,96 +0,0 @@ -package ratelimit - -import ( - "sync" -) - -// TokenBucket provides a concurrency safe utility for adding and removing -// tokens from the available token bucket. -type TokenBucket struct { - remainingTokens uint - maxCapacity uint - minCapacity uint - mu sync.Mutex -} - -// NewTokenBucket returns an initialized TokenBucket with the capacity -// specified. -func NewTokenBucket(i uint) *TokenBucket { - return &TokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *TokenBucket) Refund(amount uint) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *TokenBucket) Capacity() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *TokenBucket) Remaining() uint { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *TokenBucket) Resize(size uint) uint { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = uintMax(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} - -func uintMin(a, b uint) uint { - if a < b { - return a - } - return b -} - -func uintMax(a, b uint) uint { - if a > b { - return a - } - return b -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go deleted file mode 100644 index d89090ad3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go +++ /dev/null @@ -1,83 +0,0 @@ -package ratelimit - -import ( - "context" - "fmt" -) - -type rateToken struct { - tokenCost uint - bucket *TokenBucket -} - -func (t rateToken) release() error { - t.bucket.Refund(t.tokenCost) - return nil -} - -// TokenRateLimit provides a Token Bucket RateLimiter implementation -// that limits the overall number of retry attempts that can be made across -// operation invocations. -type TokenRateLimit struct { - bucket *TokenBucket -} - -// NewTokenRateLimit returns an TokenRateLimit with default values. -// Functional options can configure the retry rate limiter. -func NewTokenRateLimit(tokens uint) *TokenRateLimit { - return &TokenRateLimit{ - bucket: NewTokenBucket(tokens), - } -} - -type canceledError struct { - Err error -} - -func (c canceledError) CanceledError() bool { return true } -func (c canceledError) Unwrap() error { return c.Err } -func (c canceledError) Error() string { - return fmt.Sprintf("canceled, %v", c.Err) -} - -// GetToken may cause a available pool of retry quota to be -// decremented. Will return an error if the decremented value can not be -// reduced from the retry quota. -func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { - select { - case <-ctx.Done(): - return nil, canceledError{Err: ctx.Err()} - default: - } - if avail, ok := l.bucket.Retrieve(cost); !ok { - return nil, QuotaExceededError{Available: avail, Requested: cost} - } - - return rateToken{ - tokenCost: cost, - bucket: l.bucket, - }.release, nil -} - -// AddTokens increments the token bucket by a fixed amount. -func (l *TokenRateLimit) AddTokens(v uint) error { - l.bucket.Refund(v) - return nil -} - -// Remaining returns the number of remaining tokens in the bucket. -func (l *TokenRateLimit) Remaining() uint { - return l.bucket.Remaining() -} - -// QuotaExceededError provides the SDK error when the retries for a given -// token bucket have been exhausted. -type QuotaExceededError struct { - Available uint - Requested uint -} - -func (e QuotaExceededError) Error() string { - return fmt.Sprintf("retry quota exceeded, %d available, %d requested", - e.Available, e.Requested) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go deleted file mode 100644 index d8d00e615..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go +++ /dev/null @@ -1,25 +0,0 @@ -package aws - -import ( - "fmt" -) - -// TODO remove replace with smithy.CanceledError - -// RequestCanceledError is the error that will be returned by an API request -// that was canceled. Requests given a Context may return this error when -// canceled. -type RequestCanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*RequestCanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *RequestCanceledError) Unwrap() error { - return e.Err -} -func (e *RequestCanceledError) Error() string { - return fmt.Sprintf("request canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go deleted file mode 100644 index 4dfde8573..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go +++ /dev/null @@ -1,156 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -const ( - // DefaultRequestCost is the cost of a single request from the adaptive - // rate limited token bucket. - DefaultRequestCost uint = 1 -) - -// DefaultThrottles provides the set of errors considered throttle errors that -// are checked by default. -var DefaultThrottles = []IsErrorThrottle{ - ThrottleErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// AdaptiveModeOptions provides the functional options for configuring the -// adaptive retry mode, and delay behavior. -type AdaptiveModeOptions struct { - // If the adaptive token bucket is empty, when an attempt will be made - // AdaptiveMode will sleep until a token is available. This can occur when - // attempts fail with throttle errors. Use this option to disable the sleep - // until token is available, and return error immediately. - FailOnNoAttemptTokens bool - - // The cost of an attempt from the AdaptiveMode's adaptive token bucket. - RequestCost uint - - // Set of strategies to determine if the attempt failed due to a throttle - // error. - // - // It is safe to append to this list in NewAdaptiveMode's functional options. - Throttles []IsErrorThrottle - - // Set of options for standard retry mode that AdaptiveMode is built on top - // of. AdaptiveMode may apply its own defaults to Standard retry mode that - // are different than the defaults of NewStandard. Use these options to - // override the default options. - StandardOptions []func(*StandardOptions) -} - -// AdaptiveMode provides an experimental retry strategy that expands on the -// Standard retry strategy, adding client attempt rate limits. The attempt rate -// limit is initially unrestricted, but becomes restricted when the attempt -// fails with for a throttle error. When restricted AdaptiveMode may need to -// sleep before an attempt is made, if too many throttles have been received. -// AdaptiveMode's sleep can be canceled with context cancel. Set -// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, -// to fail fast. -// -// Eventually unrestricted attempt rate limit will be restored once attempts no -// longer are failing due to throttle errors. -type AdaptiveMode struct { - options AdaptiveModeOptions - throttles IsErrorThrottles - - retryer aws.RetryerV2 - rateLimit *adaptiveRateLimit -} - -// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. -func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { - o := AdaptiveModeOptions{ - RequestCost: DefaultRequestCost, - Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), - } - for _, fn := range optFns { - fn(&o) - } - - return &AdaptiveMode{ - options: o, - throttles: IsErrorThrottles(o.Throttles), - retryer: NewStandard(o.StandardOptions...), - rateLimit: newAdaptiveRateLimit(), - } -} - -// IsErrorRetryable returns if the failed attempt is retryable. This check -// should determine if the error can be retried, or if the error is -// terminal. -func (a *AdaptiveMode) IsErrorRetryable(err error) bool { - return a.retryer.IsErrorRetryable(err) -} - -// MaxAttempts returns the maximum number of attempts that can be made for -// an attempt before failing. A value of 0 implies that the attempt should -// be retried until it succeeds if the errors are retryable. -func (a *AdaptiveMode) MaxAttempts() int { - return a.retryer.MaxAttempts() -} - -// RetryDelay returns the delay that should be used before retrying the -// attempt. Will return error if the if the delay could not be determined. -func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( - time.Duration, error, -) { - return a.retryer.RetryDelay(attempt, opErr) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( - releaseToken func(error) error, err error, -) { - return a.retryer.GetRetryToken(ctx, opErr) -} - -// GetInitialToken returns the initial attempt token that can increment the -// retry token pool if the attempt is successful. -// -// Deprecated: This method does not provide a way to block using Context, -// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only -// present to implement Retryer interface. -func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { - return nopRelease -} - -// GetAttemptToken returns the attempt token that can be used to rate limit -// attempt calls. Will be used by the SDK's retry package's Attempt -// middleware to get an attempt token prior to calling the temp and releasing -// the attempt token after the attempt has been made. -func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { - for { - acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) - if acquiredToken { - break - } - if a.options.FailOnNoAttemptTokens { - return nil, fmt.Errorf( - "unable to get attempt token, and FailOnNoAttemptTokens enables") - } - - if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { - return nil, fmt.Errorf("failed to wait for token to be available, %w", err) - } - } - - return a.handleResponse, nil -} - -func (a *AdaptiveMode) handleResponse(opErr error) error { - throttled := a.throttles.IsErrorThrottle(opErr).Bool() - - a.rateLimit.Update(throttled) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go deleted file mode 100644 index ad96d9b8c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go +++ /dev/null @@ -1,158 +0,0 @@ -package retry - -import ( - "math" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -type adaptiveRateLimit struct { - tokenBucketEnabled bool - - smooth float64 - beta float64 - scaleConstant float64 - minFillRate float64 - - fillRate float64 - calculatedRate float64 - lastRefilled time.Time - measuredTxRate float64 - lastTxRateBucket float64 - requestCount int64 - lastMaxRate float64 - lastThrottleTime time.Time - timeWindow float64 - - tokenBucket *adaptiveTokenBucket - - mu sync.Mutex -} - -func newAdaptiveRateLimit() *adaptiveRateLimit { - now := sdk.NowTime() - return &adaptiveRateLimit{ - smooth: 0.8, - beta: 0.7, - scaleConstant: 0.4, - - minFillRate: 0.5, - - lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), - lastThrottleTime: now, - - tokenBucket: newAdaptiveTokenBucket(0), - } -} - -func (a *adaptiveRateLimit) Enable(v bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.tokenBucketEnabled = v -} - -func (a *adaptiveRateLimit) AcquireToken(amount uint) ( - tokenAcquired bool, waitTryAgain time.Duration, -) { - a.mu.Lock() - defer a.mu.Unlock() - - if !a.tokenBucketEnabled { - return true, 0 - } - - a.tokenBucketRefill() - - available, ok := a.tokenBucket.Retrieve(float64(amount)) - if !ok { - waitDur := float64Seconds((float64(amount) - available) / a.fillRate) - return false, waitDur - } - - return true, 0 -} - -func (a *adaptiveRateLimit) Update(throttled bool) { - a.mu.Lock() - defer a.mu.Unlock() - - a.updateMeasuredRate() - - if throttled { - rateToUse := a.measuredTxRate - if a.tokenBucketEnabled { - rateToUse = math.Min(a.measuredTxRate, a.fillRate) - } - - a.lastMaxRate = rateToUse - a.calculateTimeWindow() - a.lastThrottleTime = sdk.NowTime() - a.calculatedRate = a.cubicThrottle(rateToUse) - a.tokenBucketEnabled = true - } else { - a.calculateTimeWindow() - a.calculatedRate = a.cubicSuccess(sdk.NowTime()) - } - - newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) - a.tokenBucketUpdateRate(newRate) -} - -func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { - dt := secondsFloat64(t.Sub(a.lastThrottleTime)) - return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate -} - -func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { - return rateToUse * a.beta -} - -func (a *adaptiveRateLimit) calculateTimeWindow() { - a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) -} - -func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { - a.tokenBucketRefill() - a.fillRate = math.Max(newRPS, a.minFillRate) - a.tokenBucket.Resize(newRPS) -} - -func (a *adaptiveRateLimit) updateMeasuredRate() { - now := sdk.NowTime() - timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. - a.requestCount++ - - if timeBucket > a.lastTxRateBucket { - currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) - a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) - a.requestCount = 0 - a.lastTxRateBucket = timeBucket - } -} - -func (a *adaptiveRateLimit) tokenBucketRefill() { - now := sdk.NowTime() - if a.lastRefilled.IsZero() { - a.lastRefilled = now - return - } - - fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate - a.tokenBucket.Refund(fillAmount) - a.lastRefilled = now -} - -func float64Seconds(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -func secondsFloat64(v time.Duration) float64 { - return float64(v) / float64(time.Second) -} - -func timeFloat64Seconds(v time.Time) float64 { - return float64(v.UnixNano()) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go deleted file mode 100644 index 052723e8e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go +++ /dev/null @@ -1,83 +0,0 @@ -package retry - -import ( - "math" - "sync" -) - -// adaptiveTokenBucket provides a concurrency safe utility for adding and -// removing tokens from the available token bucket. -type adaptiveTokenBucket struct { - remainingTokens float64 - maxCapacity float64 - minCapacity float64 - mu sync.Mutex -} - -// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the -// capacity specified. -func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { - return &adaptiveTokenBucket{ - remainingTokens: i, - maxCapacity: i, - minCapacity: 1, - } -} - -// Retrieve attempts to reduce the available tokens by the amount requested. If -// there are tokens available true will be returned along with the number of -// available tokens remaining. If amount requested is larger than the available -// capacity, false will be returned along with the available capacity. If the -// amount is less than the available capacity, the capacity will be reduced by -// that amount, and the remaining capacity and true will be returned. -func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { - t.mu.Lock() - defer t.mu.Unlock() - - if amount > t.remainingTokens { - return t.remainingTokens, false - } - - t.remainingTokens -= amount - return t.remainingTokens, true -} - -// Refund returns the amount of tokens back to the available token bucket, up -// to the initial capacity. -func (t *adaptiveTokenBucket) Refund(amount float64) { - t.mu.Lock() - defer t.mu.Unlock() - - // Capacity cannot exceed max capacity. - t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) -} - -// Capacity returns the maximum capacity of tokens that the bucket could -// contain. -func (t *adaptiveTokenBucket) Capacity() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.maxCapacity -} - -// Remaining returns the number of tokens that remaining in the bucket. -func (t *adaptiveTokenBucket) Remaining() float64 { - t.mu.Lock() - defer t.mu.Unlock() - - return t.remainingTokens -} - -// Resize adjusts the size of the token bucket. Returns the capacity remaining. -func (t *adaptiveTokenBucket) Resize(size float64) float64 { - t.mu.Lock() - defer t.mu.Unlock() - - t.maxCapacity = math.Max(size, t.minCapacity) - - // Capacity needs to be capped at max capacity, if max size reduced. - t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) - - return t.remainingTokens -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go deleted file mode 100644 index bfa5bf7d1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -package retry - -import ( - "context" - - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" -) - -type attemptMetrics struct { - Attempts metrics.Int64Counter - Errors metrics.Int64Counter - - AttemptDuration metrics.Float64Histogram -} - -func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) { - m := &attemptMetrics{} - var err error - - m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{attempt}" - o.Description = "The number of attempts for an individual operation" - }) - if err != nil { - return nil, err - } - m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{error}" - o.Description = "The number of errors for an operation" - }) - if err != nil { - return nil, err - } - m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)" - }) - if err != nil { - return nil, err - } - - return m, nil -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go deleted file mode 100644 index 3a08ebe0a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Package retry provides interfaces and implementations for SDK request retry behavior. -// -// # Retryer Interface and Implementations -// -// This package defines Retryer interface that is used to either implement custom retry behavior -// or to extend the existing retry implementations provided by the SDK. This package provides a single -// retry implementation: Standard. -// -// # Standard -// -// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited -// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. -// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, -// and uses an additional delay policy to limit the time between a requests subsequent attempts. -// -// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether -// a given error is retryable. By default this list of retryables includes the following: -// - Retrying errors that implement the RetryableError method, and return true. -// - Connection Errors -// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. -// - Connection Reset Errors. -// - net.OpErr types that are dialing errors or are temporary. -// - HTTP Status Codes: 500, 502, 503, and 504. -// - API Error Codes -// - RequestTimeout, RequestTimeoutException -// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, -// RequestThrottled, SlowDown, EC2ThrottledException -// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException -// - TransactionInProgressException, PriorRequestNotComplete -// -// The standard retryer will not retry a request in the event if the context associated with the request -// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context -// value. -// -// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer -// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions -// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, -// and the retry delay policy. -// -// For example to modify the default retry attempts for the standard retryer: -// -// // configure the custom retryer -// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { -// o.MaxAttempts = 5 -// }) -// -// // create a service client with the retryer -// s3.NewFromConfig(cfg, func(o *s3.Options) { -// o.Retryer = customRetry -// }) -// -// # Utilities -// -// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic -// way. These are: -// -// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable -// in addition to those considered retryable by the provided retryer. -// -// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping -// a retryer implementation. -// -// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a -// request by wrapping a retryer implementation. -// -// The following package functions have been provided to easily satisfy different retry interfaces to further customize -// a given retryer's behavior: -// -// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, -// you can use this method to easily create custom back off policies to be used with the -// standard retryer. -// -// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be retried. -// -// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be considered a timeout. -package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go deleted file mode 100644 index 3e432eefe..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package retry - -import "fmt" - -// MaxAttemptsError provides the error when the maximum number of attempts have -// been exceeded. -type MaxAttemptsError struct { - Attempt int - Err error -} - -func (e *MaxAttemptsError) Error() string { - return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) -} - -// Unwrap returns the nested error causing the max attempts error. Provides the -// implementation for errors.Is and errors.As to unwrap nested errors. -func (e *MaxAttemptsError) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go deleted file mode 100644 index c266996de..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go +++ /dev/null @@ -1,49 +0,0 @@ -package retry - -import ( - "math" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/timeconv" -) - -// ExponentialJitterBackoff provides backoff delays with jitter based on the -// number of attempts. -type ExponentialJitterBackoff struct { - maxBackoff time.Duration - // precomputed number of attempts needed to reach max backoff. - maxBackoffAttempts float64 - - randFloat64 func() (float64, error) -} - -// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured -// for the max backoff. -func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { - return &ExponentialJitterBackoff{ - maxBackoff: maxBackoff, - maxBackoffAttempts: math.Log2( - float64(maxBackoff) / float64(time.Second)), - randFloat64: rand.CryptoRandFloat64, - } -} - -// BackoffDelay returns the duration to wait before the next attempt should be -// made. Returns an error if unable get a duration. -func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { - if attempt > int(j.maxBackoffAttempts) { - return j.maxBackoff, nil - } - - b, err := j.randFloat64() - if err != nil { - return 0, err - } - - // [0.0, 1.0) * 2 ^ attempts - ri := int64(1 << uint64(attempt)) - delaySeconds := b * float64(ri) - - return timeconv.FloatSecondsDur(delaySeconds), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go deleted file mode 100644 index 7a3f18301..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" -) - -// attemptResultsKey is a metadata accessor key to retrieve metadata -// for all request attempts. -type attemptResultsKey struct { -} - -// GetAttemptResults retrieves attempts results from middleware metadata. -func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { - m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) - return m, ok -} - -// AttemptResults represents struct containing metadata returned by all request attempts. -type AttemptResults struct { - - // Results is a slice consisting attempt result from all request attempts. - // Results are stored in order request attempt is made. - Results []AttemptResult -} - -// AttemptResult represents attempt result returned by a single request attempt. -type AttemptResult struct { - - // Err is the error if received for the request attempt. - Err error - - // Retryable denotes if request may be retried. This states if an - // error is considered retryable. - Retryable bool - - // Retried indicates if this request was retried. - Retried bool - - // ResponseMetadata is any existing metadata passed via the response middlewares. - ResponseMetadata middleware.Metadata -} - -// addAttemptResults adds attempt results to middleware metadata -func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { - metadata.Set(attemptResultsKey{}, v) -} - -// GetRawResponse returns raw response recorded for the attempt result -func (a AttemptResult) GetRawResponse() interface{} { - return awsmiddle.GetRawResponse(a.ResponseMetadata) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go deleted file mode 100644 index 52d59b04b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ /dev/null @@ -1,418 +0,0 @@ -package retry - -import ( - "context" - "errors" - "fmt" - "strconv" - "strings" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - smithymiddle "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - "github.com/aws/smithy-go/transport/http" -) - -// RequestCloner is a function that can take an input request type and clone -// the request for use in a subsequent retry attempt. -type RequestCloner func(interface{}) interface{} - -type retryMetadata struct { - AttemptNum int - AttemptTime time.Time - MaxAttempts int - AttemptClockSkew time.Duration -} - -// Attempt is a Smithy Finalize middleware that handles retry attempts using -// the provided Retryer implementation. -type Attempt struct { - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogAttempts bool - - // A Meter instance for recording retry-related metrics. - OperationMeter metrics.Meter - - retryer aws.RetryerV2 - requestCloner RequestCloner -} - -// define the threshold at which we will consider certain kind of errors to be probably -// caused by clock skew -const skewThreshold = 4 * time.Minute - -// NewAttemptMiddleware returns a new Attempt retry middleware. -func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { - m := &Attempt{ - retryer: wrapAsRetryerV2(retryer), - requestCloner: requestCloner, - } - for _, fn := range optFns { - fn(m) - } - if m.OperationMeter == nil { - m.OperationMeter = metrics.NopMeterProvider{}.Meter("") - } - - return m -} - -// ID returns the middleware identifier -func (r *Attempt) ID() string { return "Retry" } - -func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { - if !r.LogAttempts { - return - } - logger.Logf(classification, format, v...) -} - -// HandleFinalize utilizes the provider Retryer implementation to attempt -// retries over the next handler -func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - var attemptNum int - var attemptClockSkew time.Duration - var attemptResults AttemptResults - - maxAttempts := r.retryer.MaxAttempts() - releaseRetryToken := nopRelease - - retryMetrics, err := newAttemptMetrics(r.OperationMeter) - if err != nil { - return out, metadata, err - } - - for { - attemptNum++ - attemptInput := in - attemptInput.Request = r.requestCloner(attemptInput.Request) - - // Record the metadata for the for attempt being started. - attemptCtx := setRetryMetadata(ctx, retryMetadata{ - AttemptNum: attemptNum, - AttemptTime: sdk.NowTime().UTC(), - MaxAttempts: maxAttempts, - AttemptClockSkew: attemptClockSkew, - }) - - // Setting clock skew to be used on other context (like signing) - ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) - - var attemptResult AttemptResult - - attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) { - o.Properties.Set("operation.attempt", attemptNum) - }) - retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx)) - - start := sdk.NowTime() - out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) - elapsed := sdk.NowTime().Sub(start) - - retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx)) - if err != nil { - retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) { - o.Properties.Set("exception.type", errorType(err)) - }) - } - - span.End() - - attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) - - // AttemptResult Retried states that the attempt was not successful, and - // should be retried. - shouldRetry := attemptResult.Retried - - // Add attempt metadata to list of all attempt metadata - attemptResults.Results = append(attemptResults.Results, attemptResult) - - if !shouldRetry { - // Ensure the last response's metadata is used as the bases for result - // metadata returned by the stack. The Slice of attempt results - // will be added to this cloned metadata. - metadata = attemptResult.ResponseMetadata.Clone() - - break - } - } - - addAttemptResults(&metadata, attemptResults) - return out, metadata, err -} - -// handleAttempt handles an individual request attempt. -func (r *Attempt) handleAttempt( - ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, -) ( - out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, -) { - defer func() { - attemptResult.Err = err - }() - - // Short circuit if this attempt never can succeed because the context is - // canceled. This reduces the chance of token pools being modified for - // attempts that will not be made - select { - case <-ctx.Done(): - return out, attemptResult, nopRelease, ctx.Err() - default: - } - - //------------------------------ - // Get Attempt Token - //------------------------------ - releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) - if err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to get retry Send token, %w", err) - } - - //------------------------------ - // Send Attempt - //------------------------------ - logger := smithymiddle.GetLogger(ctx) - service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) - retryMetadata, _ := getRetryMetadata(ctx) - attemptNum := retryMetadata.AttemptNum - maxAttempts := retryMetadata.MaxAttempts - - // Following attempts must ensure the request payload stream starts in a - // rewound state. - if attemptNum > 1 { - if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { - if rewindErr := rewindable.RewindStream(); rewindErr != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to rewind transport stream for retry, %w", rewindErr) - } - } - - r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", - service, operation, attemptNum) - } - - var metadata smithymiddle.Metadata - out, metadata, err = next.HandleFinalize(ctx, in) - attemptResult.ResponseMetadata = metadata - - //------------------------------ - // Bookkeeping - //------------------------------ - // Release the retry token based on the state of the attempt's error (if any). - if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release retry token after request error, %w", err) - } - // Release the attempt token based on the state of the attempt's error (if any). - if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { - return out, attemptResult, nopRelease, fmt.Errorf( - "failed to release initial token after request error, %w", err) - } - // If there was no error making the attempt, nothing further to do. There - // will be nothing to retry. - if err == nil { - return out, attemptResult, nopRelease, err - } - - err = wrapAsClockSkew(ctx, err) - - //------------------------------ - // Is Retryable and Should Retry - //------------------------------ - // If the attempt failed with an unretryable error, nothing further to do - // but return, and inform the caller about the terminal failure. - retryable := r.retryer.IsErrorRetryable(err) - if !retryable { - r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) - return out, attemptResult, nopRelease, err - } - - // set retryable to true - attemptResult.Retryable = true - - // Once the maximum number of attempts have been exhausted there is nothing - // further to do other than inform the caller about the terminal failure. - if maxAttempts > 0 && attemptNum >= maxAttempts { - r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) - err = &MaxAttemptsError{ - Attempt: attemptNum, - Err: err, - } - return out, attemptResult, nopRelease, err - } - - //------------------------------ - // Get Retry (aka Retry Quota) Token - //------------------------------ - // Get a retry token that will be released after the - releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) - if retryTokenErr != nil { - return out, attemptResult, nopRelease, retryTokenErr - } - - //------------------------------ - // Retry Delay and Sleep - //------------------------------ - // Get the retry delay before another attempt can be made, and sleep for - // that time. Potentially early exist if the sleep is canceled via the - // context. - retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) - if reqErr != nil { - return out, attemptResult, releaseRetryToken, reqErr - } - if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { - err = &aws.RequestCanceledError{Err: reqErr} - return out, attemptResult, releaseRetryToken, err - } - - // The request should be re-attempted. - attemptResult.Retried = true - - return out, attemptResult, releaseRetryToken, err -} - -// errors that, if detected when we know there's a clock skew, -// can be retried and have a high chance of success -var possibleSkewCodes = map[string]struct{}{ - "InvalidSignatureException": {}, - "SignatureDoesNotMatch": {}, - "AuthFailure": {}, -} - -var definiteSkewCodes = map[string]struct{}{ - "RequestExpired": {}, - "RequestInTheFuture": {}, - "RequestTimeTooSkewed": {}, -} - -// wrapAsClockSkew checks if this error could be related to a clock skew -// error and if so, wrap the error. -func wrapAsClockSkew(ctx context.Context, err error) error { - var v interface{ ErrorCode() string } - if !errors.As(err, &v) { - return err - } - if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { - return &retryableClockSkewError{Err: err} - } - _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] - if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { - return &retryableClockSkewError{Err: err} - } - return err -} - -// MetricsHeader attaches SDK request metric header for retries to the transport -type MetricsHeader struct{} - -// ID returns the middleware identifier -func (r *MetricsHeader) ID() string { - return "RetryMetricsHeader" -} - -// HandleFinalize attaches the SDK request metric header to the transport layer -func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( - out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, -) { - retryMetadata, _ := getRetryMetadata(ctx) - - const retryMetricHeader = "Amz-Sdk-Request" - var parts []string - - parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) - if retryMetadata.MaxAttempts != 0 { - parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) - } - - var ttl time.Time - if deadline, ok := ctx.Deadline(); ok { - ttl = deadline - } - - // Only append the TTL if it can be determined. - if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { - const unixTimeFormat = "20060102T150405Z" - ttl = ttl.Add(retryMetadata.AttemptClockSkew) - parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) - } - - switch req := in.Request.(type) { - case *http.Request: - req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) - default: - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - return next.HandleFinalize(ctx, in) -} - -type retryMetadataKey struct{} - -// getRetryMetadata retrieves retryMetadata from the context and a bool -// indicating if it was set. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { - metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) - return metadata, ok -} - -// setRetryMetadata sets the retryMetadata on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { - return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) -} - -// AddRetryMiddlewaresOptions is the set of options that can be passed to -// AddRetryMiddlewares for configuring retry associated middleware. -type AddRetryMiddlewaresOptions struct { - Retryer aws.Retryer - - // Enable the logging of retry attempts performed by the SDK. This will - // include logging retry attempts, unretryable errors, and when max - // attempts are reached. - LogRetryAttempts bool -} - -// AddRetryMiddlewares adds retry middleware to operation middleware stack -func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { - attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { - middleware.LogAttempts = options.LogRetryAttempts - }) - - // index retry to before signing, if signing exists - if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil { - return err - } - - if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil { - return err - } - return nil -} - -// Determines the value of exception.type for metrics purposes. We prefer an -// API-specific error code, otherwise it's just the Go type for the value. -func errorType(err error) string { - var terr smithy.APIError - if errors.As(err, &terr) { - return terr.ErrorCode() - } - return fmt.Sprintf("%T", err) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go deleted file mode 100644 index af81635b3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go +++ /dev/null @@ -1,90 +0,0 @@ -package retry - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// AddWithErrorCodes returns a Retryer with additional error codes considered -// for determining if the error should be retried. -func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { - retryable := &RetryableErrorCode{ - Codes: map[string]struct{}{}, - } - for _, c := range codes { - retryable.Codes[c] = struct{}{} - } - - return &withIsErrorRetryable{ - RetryerV2: wrapAsRetryerV2(r), - Retryable: retryable, - } -} - -type withIsErrorRetryable struct { - aws.RetryerV2 - Retryable IsErrorRetryable -} - -func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { - if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { - return v.Bool() - } - return r.RetryerV2.IsErrorRetryable(err) -} - -// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value -// specified. -func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { - return &withMaxAttempts{ - RetryerV2: wrapAsRetryerV2(r), - Max: max, - } -} - -type withMaxAttempts struct { - aws.RetryerV2 - Max int -} - -func (w *withMaxAttempts) MaxAttempts() int { - return w.Max -} - -// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer -// overriding the RetryDelay behavior for a alternate minimum initial backoff -// delay. -func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { - return &withMaxBackoffDelay{ - RetryerV2: wrapAsRetryerV2(r), - backoff: NewExponentialJitterBackoff(delay), - } -} - -type withMaxBackoffDelay struct { - aws.RetryerV2 - backoff *ExponentialJitterBackoff -} - -func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { - return r.backoff.BackoffDelay(attempt, err) -} - -type wrappedAsRetryerV2 struct { - aws.Retryer -} - -func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { - v, ok := r.(aws.RetryerV2) - if !ok { - v = wrappedAsRetryerV2{Retryer: r} - } - - return v -} - -func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { - return w.Retryer.GetInitialToken(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go deleted file mode 100644 index 1b485f998..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ /dev/null @@ -1,228 +0,0 @@ -package retry - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorRetryable provides the interface of an implementation to determine if -// a error as the result of an operation is retryable. -type IsErrorRetryable interface { - IsErrorRetryable(error) aws.Ternary -} - -// IsErrorRetryables is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorRetryables []IsErrorRetryable - -// IsErrorRetryable returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. -type IsErrorRetryableFunc func(error) aws.Ternary - -// IsErrorRetryable returns if the error is retryable. -func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { - return fn(err) -} - -// RetryableError is an IsErrorRetryable implementation which uses the -// optional interface Retryable on the error value to determine if the error is -// retryable. -type RetryableError struct{} - -// IsErrorRetryable returns if the error is retryable if it satisfies the -// Retryable interface, and returns if the attempt should be retried. -func (RetryableError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ RetryableError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.RetryableError()) -} - -// NoRetryCanceledError detects if the error was an request canceled error and -// returns if so. -type NoRetryCanceledError struct{} - -// IsErrorRetryable returns the error is not retryable if the request was -// canceled. -func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { - var v interface{ CanceledError() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - if v.CanceledError() { - return aws.FalseTernary - } - return aws.UnknownTernary -} - -// RetryableConnectionError determines if the underlying error is an HTTP -// connection and returns if it should be retried. -// -// Includes errors such as connection reset, connection refused, net dial, -// temporary, and timeout errors. -type RetryableConnectionError struct{} - -// IsErrorRetryable returns if the error is caused by and HTTP connection -// error, and should be retried. -func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { - if err == nil { - return aws.UnknownTernary - } - var retryable bool - - var conErr interface{ ConnectionError() bool } - var tempErr interface{ Temporary() bool } - var timeoutErr interface{ Timeout() bool } - var urlErr *url.Error - var netOpErr *net.OpError - var dnsError *net.DNSError - - if errors.As(err, &dnsError) { - // NXDOMAIN errors should not be retried - if dnsError.IsNotFound { - return aws.BoolTernary(false) - } - - // if !dnsError.Temporary(), error may or may not be temporary, - // (i.e. !Temporary() =/=> !retryable) so we should fall through to - // remaining checks - if dnsError.Temporary() { - return aws.BoolTernary(true) - } - } - - switch { - case errors.As(err, &conErr) && conErr.ConnectionError(): - retryable = true - - case strings.Contains(err.Error(), "use of closed network connection"): - fallthrough - case strings.Contains(err.Error(), "connection reset"): - // The errors "connection reset" and "use of closed network connection" - // are effectively the same. It appears to be the difference between - // sync and async read of TCP RST in the stdlib's net.Conn read loop. - // see #2737 - retryable = true - - case errors.As(err, &urlErr): - // Refused connections should be retried as the service may not yet be - // running on the port. Go TCP dial considers refused connections as - // not temporary. - if strings.Contains(urlErr.Error(), "connection refused") { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(urlErr)) - } - - case errors.As(err, &netOpErr): - // Network dial, or temporary network errors are always retryable. - if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { - retryable = true - } else { - return r.IsErrorRetryable(errors.Unwrap(netOpErr)) - } - - case errors.As(err, &tempErr) && tempErr.Temporary(): - // Fallback to the generic temporary check, with temporary errors - // retryable. - retryable = true - - case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): - // Fallback to the generic timeout check, with timeout errors - // retryable. - retryable = true - - default: - return aws.UnknownTernary - } - - return aws.BoolTernary(retryable) - -} - -// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status -// codes. -type RetryableHTTPStatusCode struct { - Codes map[int]struct{} -} - -// IsErrorRetryable return if the passed in error is retryable based on the -// HTTP status code. -func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ HTTPStatusCode() int } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.HTTPStatusCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} - -// RetryableErrorCode determines if an attempt should be retried based on the -// API error code. -type RetryableErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorRetryable return if the error is retryable based on the error codes. -// Returns unknown if the error doesn't have a code or it is unknown. -func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} - -// retryableClockSkewError marks errors that can be caused by clock skew -// (difference between server time and client time). -// This is returned when there's certain confidence that adjusting the client time -// could allow a retry to succeed -type retryableClockSkewError struct{ Err error } - -func (e *retryableClockSkewError) Error() string { - return fmt.Sprintf("Probable clock skew error: %v", e.Err) -} - -// Unwrap returns the wrapped error. -func (e *retryableClockSkewError) Unwrap() error { - return e.Err -} - -// RetryableError allows the retryer to retry this request -func (e *retryableClockSkewError) RetryableError() bool { - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go deleted file mode 100644 index d5ea93222..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ /dev/null @@ -1,269 +0,0 @@ -package retry - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/ratelimit" -) - -// BackoffDelayer provides the interface for determining the delay to before -// another request attempt, that previously failed. -type BackoffDelayer interface { - BackoffDelay(attempt int, err error) (time.Duration, error) -} - -// BackoffDelayerFunc provides a wrapper around a function to determine the -// backoff delay of an attempt retry. -type BackoffDelayerFunc func(int, error) (time.Duration, error) - -// BackoffDelay returns the delay before attempt to retry a request. -func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { - return fn(attempt, err) -} - -const ( - // DefaultMaxAttempts is the maximum of attempts for an API request - DefaultMaxAttempts int = 3 - - // DefaultMaxBackoff is the maximum back off delay between attempts - DefaultMaxBackoff time.Duration = 20 * time.Second -) - -// Default retry token quota values. -const ( - DefaultRetryRateTokens uint = 500 - DefaultRetryCost uint = 5 - DefaultRetryTimeoutCost uint = 10 - DefaultNoRetryIncrement uint = 1 -) - -// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK -// should consider as retryable errors. -var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ - 500: {}, - 502: {}, - 503: {}, - 504: {}, -} - -// DefaultRetryableErrorCodes provides the set of API error codes that should -// be retried. -var DefaultRetryableErrorCodes = map[string]struct{}{ - "RequestTimeout": {}, - "RequestTimeoutException": {}, -} - -// DefaultThrottleErrorCodes provides the set of API error codes that are -// considered throttle errors. -var DefaultThrottleErrorCodes = map[string]struct{}{ - "Throttling": {}, - "ThrottlingException": {}, - "ThrottledException": {}, - "RequestThrottledException": {}, - "TooManyRequestsException": {}, - "ProvisionedThroughputExceededException": {}, - "TransactionInProgressException": {}, - "RequestLimitExceeded": {}, - "BandwidthLimitExceeded": {}, - "LimitExceededException": {}, - "RequestThrottled": {}, - "SlowDown": {}, - "PriorRequestNotComplete": {}, - "EC2ThrottledException": {}, -} - -// DefaultRetryables provides the set of retryable checks that are used by -// default. -var DefaultRetryables = []IsErrorRetryable{ - NoRetryCanceledError{}, - RetryableError{}, - RetryableConnectionError{}, - RetryableHTTPStatusCode{ - Codes: DefaultRetryableHTTPStatusCodes, - }, - RetryableErrorCode{ - Codes: DefaultRetryableErrorCodes, - }, - RetryableErrorCode{ - Codes: DefaultThrottleErrorCodes, - }, -} - -// DefaultTimeouts provides the set of timeout checks that are used by default. -var DefaultTimeouts = []IsErrorTimeout{ - TimeouterError{}, -} - -// StandardOptions provides the functional options for configuring the standard -// retryable, and delay behavior. -type StandardOptions struct { - // Maximum number of attempts that should be made. - MaxAttempts int - - // MaxBackoff duration between retried attempts. - MaxBackoff time.Duration - - // Provides the backoff strategy the retryer will use to determine the - // delay between retry attempts. - Backoff BackoffDelayer - - // Set of strategies to determine if the attempt should be retried based on - // the error response received. - // - // It is safe to append to this list in NewStandard's functional options. - Retryables []IsErrorRetryable - - // Set of strategies to determine if the attempt failed due to a timeout - // error. - // - // It is safe to append to this list in NewStandard's functional options. - Timeouts []IsErrorTimeout - - // Provides the rate limiting strategy for rate limiting attempt retries - // across all attempts the retryer is being used with. - // - // A RateLimiter operates as a token bucket with a set capacity, where - // attempt failures events consume tokens. A retry attempt that attempts to - // consume more tokens than what's available results in operation failure. - // The default implementation is parameterized as follows: - // - a capacity of 500 (DefaultRetryRateTokens) - // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) - // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) - // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) - // - // You can disable rate limiting by setting this field to ratelimit.None. - RateLimiter RateLimiter - - // The cost to deduct from the RateLimiter's token bucket per retry. - RetryCost uint - - // The cost to deduct from the RateLimiter's token bucket per retry caused - // by timeout error. - RetryTimeoutCost uint - - // The cost to payback to the RateLimiter's token bucket for successful - // attempts. - NoRetryIncrement uint -} - -// RateLimiter provides the interface for limiting the rate of attempt retries -// allowed by the retryer. -type RateLimiter interface { - GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) - AddTokens(uint) error -} - -// Standard is the standard retry pattern for the SDK. It uses a set of -// retryable checks to determine of the failed attempt should be retried, and -// what retry delay should be used. -type Standard struct { - options StandardOptions - - timeout IsErrorTimeout - retryable IsErrorRetryable - backoff BackoffDelayer -} - -// NewStandard initializes a standard retry behavior with defaults that can be -// overridden via functional options. -func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { - o := StandardOptions{ - MaxAttempts: DefaultMaxAttempts, - MaxBackoff: DefaultMaxBackoff, - Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), - Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), - - RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), - RetryCost: DefaultRetryCost, - RetryTimeoutCost: DefaultRetryTimeoutCost, - NoRetryIncrement: DefaultNoRetryIncrement, - } - for _, fn := range fnOpts { - fn(&o) - } - if o.MaxAttempts <= 0 { - o.MaxAttempts = DefaultMaxAttempts - } - - backoff := o.Backoff - if backoff == nil { - backoff = NewExponentialJitterBackoff(o.MaxBackoff) - } - - return &Standard{ - options: o, - backoff: backoff, - retryable: IsErrorRetryables(o.Retryables), - timeout: IsErrorTimeouts(o.Timeouts), - } -} - -// MaxAttempts returns the maximum number of attempts that can be made for a -// request before failing. -func (s *Standard) MaxAttempts() int { - return s.options.MaxAttempts -} - -// IsErrorRetryable returns if the error is can be retried or not. Should not -// consider the number of attempts made. -func (s *Standard) IsErrorRetryable(err error) bool { - return s.retryable.IsErrorRetryable(err).Bool() -} - -// RetryDelay returns the delay to use before another request attempt is made. -func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { - return s.backoff.BackoffDelay(attempt, err) -} - -// GetAttemptToken returns the token to be released after then attempt completes. -// The release token will add NoRetryIncrement to the RateLimiter token pool if -// the attempt was successful. If the attempt failed, nothing will be done. -func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { - return s.GetInitialToken(), nil -} - -// GetInitialToken returns a token for adding the NoRetryIncrement to the -// RateLimiter token if the attempt completed successfully without error. -// -// InitialToken applies to result of the each attempt, including the first. -// Whereas the RetryToken applies to the result of subsequent attempts. -// -// Deprecated: use GetAttemptToken instead. -func (s *Standard) GetInitialToken() func(error) error { - return releaseToken(s.noRetryIncrement).release -} - -func (s *Standard) noRetryIncrement() error { - return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) -} - -// GetRetryToken attempts to deduct the retry cost from the retry token pool. -// Returning the token release function, or error. -func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { - cost := s.options.RetryCost - - if s.timeout.IsErrorTimeout(opErr).Bool() { - cost = s.options.RetryTimeoutCost - } - - fn, err := s.options.RateLimiter.GetToken(ctx, cost) - if err != nil { - return nil, fmt.Errorf("failed to get rate limit token, %w", err) - } - - return releaseToken(fn).release, nil -} - -func nopRelease(error) error { return nil } - -type releaseToken func() error - -func (f releaseToken) release(err error) error { - if err != nil { - return nil - } - - return f() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go deleted file mode 100644 index c4b844d15..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go +++ /dev/null @@ -1,60 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorThrottle provides the interface of an implementation to determine if -// a error response from an operation is a throttling error. -type IsErrorThrottle interface { - IsErrorThrottle(error) aws.Ternary -} - -// IsErrorThrottles is a collection of checks to determine of the error a -// throttle error. Iterates through the checks and returns the state of -// throttle if any check returns something other than unknown. -type IsErrorThrottles []IsErrorThrottle - -// IsErrorThrottle returns if the error is a throttle error if any of the -// checks in the list return a value other than unknown. -func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { - for _, re := range r { - if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. -type IsErrorThrottleFunc func(error) aws.Ternary - -// IsErrorThrottle returns if the error is a throttle error. -func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { - return fn(err) -} - -// ThrottleErrorCode determines if an attempt should be retried based on the -// API error code. -type ThrottleErrorCode struct { - Codes map[string]struct{} -} - -// IsErrorThrottle return if the error is a throttle error based on the error -// codes. Returns unknown if the error doesn't have a code or it is unknown. -func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { - var v interface{ ErrorCode() string } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - _, ok := r.Codes[v.ErrorCode()] - if !ok { - return aws.UnknownTernary - } - - return aws.TrueTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go deleted file mode 100644 index 3d47870d2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go +++ /dev/null @@ -1,52 +0,0 @@ -package retry - -import ( - "errors" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// IsErrorTimeout provides the interface of an implementation to determine if -// a error matches. -type IsErrorTimeout interface { - IsErrorTimeout(err error) aws.Ternary -} - -// IsErrorTimeouts is a collection of checks to determine of the error is -// retryable. Iterates through the checks and returns the state of retryable -// if any check returns something other than unknown. -type IsErrorTimeouts []IsErrorTimeout - -// IsErrorTimeout returns if the error is retryable if any of the checks in -// the list return a value other than unknown. -func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { - for _, t := range ts { - if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { - return v - } - } - return aws.UnknownTernary -} - -// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. -type IsErrorTimeoutFunc func(error) aws.Ternary - -// IsErrorTimeout returns if the error is retryable. -func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { - return fn(err) -} - -// TimeouterError provides the IsErrorTimeout implementation for determining if -// an error is a timeout based on type with the Timeout method. -type TimeouterError struct{} - -// IsErrorTimeout returns if the error is a timeout error. -func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { - var v interface{ Timeout() bool } - - if !errors.As(err, &v) { - return aws.UnknownTernary - } - - return aws.BoolTernary(v.Timeout()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go deleted file mode 100644 index b0ba4cb2f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ /dev/null @@ -1,127 +0,0 @@ -package aws - -import ( - "context" - "fmt" - "time" -) - -// RetryMode provides the mode the API client will use to create a retryer -// based on. -type RetryMode string - -const ( - // RetryModeStandard model provides rate limited retry attempts with - // exponential backoff delay. - RetryModeStandard RetryMode = "standard" - - // RetryModeAdaptive model provides attempt send rate limiting on throttle - // responses in addition to standard mode's retry rate limiting. - // - // Adaptive retry mode is experimental and is subject to change in the - // future. - RetryModeAdaptive RetryMode = "adaptive" -) - -// ParseRetryMode attempts to parse a RetryMode from the given string. -// Returning error if the value is not a known RetryMode. -func ParseRetryMode(v string) (mode RetryMode, err error) { - switch v { - case "standard": - return RetryModeStandard, nil - case "adaptive": - return RetryModeAdaptive, nil - default: - return mode, fmt.Errorf("unknown RetryMode, %v", v) - } -} - -func (m RetryMode) String() string { return string(m) } - -// Retryer is an interface to determine if a given error from a -// attempt should be retried, and if so what backoff delay to apply. The -// default implementation used by most services is the retry package's Standard -// type. Which contains basic retry logic using exponential backoff. -type Retryer interface { - // IsErrorRetryable returns if the failed attempt is retryable. This check - // should determine if the error can be retried, or if the error is - // terminal. - IsErrorRetryable(error) bool - - // MaxAttempts returns the maximum number of attempts that can be made for - // an attempt before failing. A value of 0 implies that the attempt should - // be retried until it succeeds if the errors are retryable. - MaxAttempts() int - - // RetryDelay returns the delay that should be used before retrying the - // attempt. Will return error if the delay could not be determined. - RetryDelay(attempt int, opErr error) (time.Duration, error) - - // GetRetryToken attempts to deduct the retry cost from the retry token pool. - // Returning the token release function, or error. - GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - GetInitialToken() (releaseToken func(error) error) -} - -// RetryerV2 is an interface to determine if a given error from an attempt -// should be retried, and if so what backoff delay to apply. The default -// implementation used by most services is the retry package's Standard type. -// Which contains basic retry logic using exponential backoff. -// -// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken -// method in favor of GetAttemptToken which takes a context, and can return an error. -// -// The SDK's retry package's Attempt middleware, and utilities will always -// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if -// GetAttemptToken is not implemented. -type RetryerV2 interface { - Retryer - - // GetInitialToken returns the initial attempt token that can increment the - // retry token pool if the attempt is successful. - // - // Deprecated: This method does not provide a way to block using Context, - // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. - GetInitialToken() (releaseToken func(error) error) - - // GetAttemptToken returns the send token that can be used to rate limit - // attempt calls. Will be used by the SDK's retry package's Attempt - // middleware to get a send token prior to calling the temp and releasing - // the send token after the attempt has been made. - GetAttemptToken(context.Context) (func(error) error, error) -} - -// NopRetryer provides a RequestRetryDecider implementation that will flag -// all attempt errors as not retryable, with a max attempts of 1. -type NopRetryer struct{} - -// IsErrorRetryable returns false for all error values. -func (NopRetryer) IsErrorRetryable(error) bool { return false } - -// MaxAttempts always returns 1 for the original attempt. -func (NopRetryer) MaxAttempts() int { return 1 } - -// RetryDelay is not valid for the NopRetryer. Will always return error. -func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { - return 0, fmt.Errorf("not retrying any attempt errors") -} - -// GetRetryToken returns a stub function that does nothing. -func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { - return nopReleaseToken, nil -} - -// GetInitialToken returns a stub function that does nothing. -func (NopRetryer) GetInitialToken() func(error) error { - return nopReleaseToken -} - -// GetAttemptToken returns a stub function that does nothing. -func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { - return nopReleaseToken, nil -} - -func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go deleted file mode 100644 index 3af9b2b33..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go +++ /dev/null @@ -1,14 +0,0 @@ -package aws - -// ExecutionEnvironmentID is the AWS execution environment runtime identifier. -type ExecutionEnvironmentID string - -// RuntimeEnvironment is a collection of values that are determined at runtime -// based on the environment that the SDK is executing in. Some of these values -// may or may not be present based on the executing environment and certain SDK -// configuration properties that drive whether these values are populated.. -type RuntimeEnvironment struct { - EnvironmentIdentifier ExecutionEnvironmentID - Region string - EC2InstanceMetadataRegion string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go deleted file mode 100644 index cbf22f1d0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go +++ /dev/null @@ -1,115 +0,0 @@ -package v4 - -import ( - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -func lookupKey(service, region string) string { - var s strings.Builder - s.Grow(len(region) + len(service) + 3) - s.WriteString(region) - s.WriteRune('/') - s.WriteString(service) - return s.String() -} - -type derivedKey struct { - AccessKey string - Date time.Time - Credential []byte -} - -type derivedKeyCache struct { - values map[string]derivedKey - mutex sync.RWMutex -} - -func newDerivedKeyCache() derivedKeyCache { - return derivedKeyCache{ - values: make(map[string]derivedKey), - } -} - -func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { - key := lookupKey(service, region) - s.mutex.RLock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.RUnlock() - return cred - } - s.mutex.RUnlock() - - s.mutex.Lock() - if cred, ok := s.get(key, credentials, signingTime.Time); ok { - s.mutex.Unlock() - return cred - } - cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) - entry := derivedKey{ - AccessKey: credentials.AccessKeyID, - Date: signingTime.Time, - Credential: cred, - } - s.values[key] = entry - s.mutex.Unlock() - - return cred -} - -func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { - cacheEntry, ok := s.retrieveFromCache(key) - if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { - return cacheEntry.Credential, true - } - return nil, false -} - -func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { - if v, ok := s.values[key]; ok { - return v, true - } - return derivedKey{}, false -} - -// SigningKeyDeriver derives a signing key from a set of credentials -type SigningKeyDeriver struct { - cache derivedKeyCache -} - -// NewSigningKeyDeriver returns a new SigningKeyDeriver -func NewSigningKeyDeriver() *SigningKeyDeriver { - return &SigningKeyDeriver{ - cache: newDerivedKeyCache(), - } -} - -// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. -func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { - return k.cache.Get(credential, service, region, signingTime) -} - -func deriveKey(secret, service, region string, t SigningTime) []byte { - hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) - hmacRegion := HMACSHA256(hmacDate, []byte(region)) - hmacService := HMACSHA256(hmacRegion, []byte(service)) - return HMACSHA256(hmacService, []byte("aws4_request")) -} - -func isSameDay(x, y time.Time) bool { - xYear, xMonth, xDay := x.Date() - yYear, yMonth, yDay := y.Date() - - if xYear != yYear { - return false - } - - if xMonth != yMonth { - return false - } - - return xDay == yDay -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go deleted file mode 100644 index a23cb003b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go +++ /dev/null @@ -1,40 +0,0 @@ -package v4 - -// Signature Version 4 (SigV4) Constants -const ( - // EmptyStringSHA256 is the hex encoded sha256 value of an empty string - EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - // UnsignedPayload indicates that the request payload body is unsigned - UnsignedPayload = "UNSIGNED-PAYLOAD" - - // AmzAlgorithmKey indicates the signing algorithm - AmzAlgorithmKey = "X-Amz-Algorithm" - - // AmzSecurityTokenKey indicates the security token to be used with temporary credentials - AmzSecurityTokenKey = "X-Amz-Security-Token" - - // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' - AmzDateKey = "X-Amz-Date" - - // AmzCredentialKey is the access key ID and credential scope - AmzCredentialKey = "X-Amz-Credential" - - // AmzSignedHeadersKey is the set of headers signed for the request - AmzSignedHeadersKey = "X-Amz-SignedHeaders" - - // AmzSignatureKey is the query parameter to store the SigV4 signature - AmzSignatureKey = "X-Amz-Signature" - - // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter - TimeFormat = "20060102T150405Z" - - // ShortTimeFormat is the shorten time format used in the credential scope - ShortTimeFormat = "20060102" - - // ContentSHAKey is the SHA256 of request body - ContentSHAKey = "X-Amz-Content-Sha256" - - // StreamingEventsPayload indicates that the request payload body is a signed event stream. - StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go deleted file mode 100644 index c61955ad5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" -) - -// Rules houses a set of Rule needed for validation of a -// string value -type Rules []Rule - -// Rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that Rule -type Rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r Rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// MapRule generic Rule for maps -type MapRule map[string]struct{} - -// IsValid for the map Rule satisfies whether it exists in the map -func (m MapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// AllowList is a generic Rule for include listing -type AllowList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (w AllowList) IsValid(value string) bool { - return w.Rule.IsValid(value) -} - -// ExcludeList is a generic Rule for exclude listing -type ExcludeList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (b ExcludeList) IsValid(value string) bool { - return !b.Rule.IsValid(value) -} - -// Patterns is a list of strings to match against -type Patterns []string - -// IsValid for Patterns checks each pattern and returns if a match has -// been found -func (p Patterns) IsValid(value string) bool { - for _, pattern := range p { - if sdkstrings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// InclusiveRules rules allow for rules to depend on one another -type InclusiveRules []Rule - -// IsValid will return true if all rules are true -func (r InclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go deleted file mode 100644 index 734e548bd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ /dev/null @@ -1,69 +0,0 @@ -package v4 - -// IgnoredHeaders is a list of headers that are ignored during signing -var IgnoredHeaders = Rules{ - ExcludeList{ - MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - "Expect": struct{}{}, - }, - }, -} - -// RequiredSignedHeaders is a allow list for Build canonical headers. -var RequiredSignedHeaders = Rules{ - AllowList{ - MapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Context": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - "X-Amz-Tagging": struct{}{}, - }, - }, - Patterns{"X-Amz-Object-Lock-"}, - Patterns{"X-Amz-Meta-"}, -} - -// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value -// represents whether or not it is a pattern. -var AllowedQueryHoisting = InclusiveRules{ - ExcludeList{RequiredSignedHeaders}, - Patterns{"X-Amz-"}, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go deleted file mode 100644 index e7fa7a1b1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. -func HMACSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go deleted file mode 100644 index bf93659a4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go +++ /dev/null @@ -1,75 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go deleted file mode 100644 index fc7887909..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import "strings" - -// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope -func BuildCredentialScope(signingTime SigningTime, region, service string) string { - return strings.Join([]string{ - signingTime.ShortTimeFormat(), - region, - service, - "aws4_request", - }, "/") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go deleted file mode 100644 index 1de06a765..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -import "time" - -// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. -type SigningTime struct { - time.Time - timeFormat string - shortTimeFormat string -} - -// NewSigningTime creates a new SigningTime given a time.Time -func NewSigningTime(t time.Time) SigningTime { - return SigningTime{ - Time: t, - } -} - -// TimeFormat provides a time formatted in the X-Amz-Date format. -func (m *SigningTime) TimeFormat() string { - return m.format(&m.timeFormat, TimeFormat) -} - -// ShortTimeFormat provides a time formatted of 20060102. -func (m *SigningTime) ShortTimeFormat() string { - return m.format(&m.shortTimeFormat, ShortTimeFormat) -} - -func (m *SigningTime) format(target *string, format string) string { - if len(*target) > 0 { - return *target - } - v := m.Time.Format(format) - *target = v - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go deleted file mode 100644 index d025dbaa0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go +++ /dev/null @@ -1,80 +0,0 @@ -package v4 - -import ( - "net/url" - "strings" -) - -const doubleSpace = " " - -// StripExcessSpaces will rewrite the passed in slice's string values to not -// contain multiple side-by-side spaces. -func StripExcessSpaces(str string) string { - var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - return str - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - return string(buf[:m]) -} - -// GetURIPath returns the escaped URI component from the provided URL. -func GetURIPath(u *url.URL) string { - var uriPath string - - if len(u.Opaque) > 0 { - const schemeSep, pathSep, queryStart = "//", "/", "?" - - opaque := u.Opaque - // Cut off the query string if present. - if idx := strings.Index(opaque, queryStart); idx >= 0 { - opaque = opaque[:idx] - } - - // Cutout the scheme separator if present. - if strings.Index(opaque, schemeSep) == 0 { - opaque = opaque[len(schemeSep):] - } - - // capture URI path starting with first path separator. - if idx := strings.Index(opaque, pathSep); idx >= 0 { - uriPath = opaque[idx:] - } - } else { - uriPath = u.EscapedPath() - } - - if len(uriPath) == 0 { - uriPath = "/" - } - - return uriPath -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go deleted file mode 100644 index 8a46220a3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ /dev/null @@ -1,420 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const computePayloadHashMiddlewareID = "ComputePayloadHash" - -// HashComputationError indicates an error occurred while computing the signing hash -type HashComputationError struct { - Err error -} - -// Error is the error message -func (e *HashComputationError) Error() string { - return fmt.Sprintf("failed to compute payload hash: %v", e.Err) -} - -// Unwrap returns the underlying error if one is set -func (e *HashComputationError) Unwrap() error { - return e.Err -} - -// SigningError indicates an error condition occurred while performing SigV4 signing -type SigningError struct { - Err error -} - -func (e *SigningError) Error() string { - return fmt.Sprintf("failed to sign request: %v", e.Err) -} - -// Unwrap returns the underlying error cause -func (e *SigningError) Unwrap() error { - return e.Err -} - -// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that -// switches between unsigned and signed payload based on TLS state for request. -// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. -// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . -// -// Usage example - -// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to -// dynamically switch between unsigned and signed payload based on TLS state for request. -func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) - return err -} - -// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. -type dynamicPayloadSigningMiddleware struct { -} - -// ID returns the resolver identifier -func (m *dynamicPayloadSigningMiddleware) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize delegates SHA256 computation according to whether the request -// is TLS-enabled. -func (m *dynamicPayloadSigningMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if req.IsHTTPS() { - return (&UnsignedPayload{}).HandleFinalize(ctx, in, next) - } - return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next) -} - -// UnsignedPayload sets the SigV4 request payload hash to unsigned. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type UnsignedPayload struct{} - -// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation -// middleware stack -func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -// ID returns the unsignedPayload identifier -func (m *UnsignedPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize sets the payload hash magic value to the unsigned sentinel. -func (m *UnsignedPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) == "" { - ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload) - } - return next.HandleFinalize(ctx, in) -} - -// ComputePayloadSHA256 computes SHA256 payload hash to sign. -// -// Will not set the Unsigned Payload magic SHA value, if a SHA has already been -// stored in the context. (e.g. application pre-computed SHA256 before making -// API call). -// -// This middleware does not check the X-Amz-Content-Sha256 header, if that -// header is serialized a middleware must translate it into the context. -type ComputePayloadSHA256 struct{} - -// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the -// operation middleware stack -func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the -// operation middleware stack -func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID) - return err -} - -// ID is the middleware name -func (m *ComputePayloadSHA256) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize computes the payload hash for the request, storing it to the -// context. This is a no-op if a caller has previously set that value. -func (m *ComputePayloadSHA256) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetPayloadHash(ctx) != "" { - return next.HandleFinalize(ctx, in) - } - - _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - hash := sha256.New() - if stream := req.GetStream(); stream != nil { - _, err = io.Copy(hash, stream) - if err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to compute payload hash, %w", err), - } - } - - if err := req.RewindStream(); err != nil { - return out, metadata, &HashComputationError{ - Err: fmt.Errorf("failed to seek body to start, %w", err), - } - } - } - - ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) - - span.End() - return next.HandleFinalize(ctx, in) -} - -// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the -// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. -// -// Use this to disable computing the Payload SHA256 checksum and instead use -// UNSIGNED-PAYLOAD for the SHA256 value. -func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{}) - return err -} - -// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to -// the Payload hash stored in the context. -type ContentSHA256Header struct{} - -// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the -// operation middleware stack -func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) -} - -// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware -// from the operation middleware stack -func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID()) - return err -} - -// ID returns the ContentSHA256HeaderMiddleware identifier -func (m *ContentSHA256Header) ID() string { - return "SigV4ContentSHA256Header" -} - -// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash -// stored in the context. -func (m *ContentSHA256Header) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) - return next.HandleFinalize(ctx, in) -} - -// SignHTTPRequestMiddlewareOptions is the configuration options for -// [SignHTTPRequestMiddleware]. -// -// Deprecated: [SignHTTPRequestMiddleware] is deprecated. -type SignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Signer HTTPSigner - LogSigning bool -} - -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 -// HTTP Signing. -// -// Deprecated: AWS service clients no longer use this middleware. Signing as an -// SDK operation is now performed through an internal per-service middleware -// which opaquely selects and uses the signer from the resolved auth scheme. -type SignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - signer HTTPSigner - logSigning bool -} - -// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using -// the given [Signer] for signing requests. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - signer: options.Signer, - logSigning: options.LogSigning, - } -} - -// ID is the SignHTTPRequestMiddleware identifier. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize will take the provided input and sign the request using the -// SigV4 authentication scheme. -// -// Deprecated: SignHTTPRequestMiddleware is deprecated. -func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if !haveCredentialProvider(s.credentialsProvider) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} - } - - signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} - } - - signerOptions := []func(o *SignerOptions){ - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }, - } - - // existing DisableURIPathEscaping is equivalent in purpose - // to authentication scheme property DisableDoubleEncoding - disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) - if overridden { - signerOptions = append(signerOptions, func(o *SignerOptions) { - o.DisableURIPathEscaping = disableDoubleEncoding - }) - } - - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} - } - - ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) - - return next.HandleFinalize(ctx, in) -} - -// StreamingEventsPayload signs input event stream messages. -type StreamingEventsPayload struct{} - -// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. -func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before) -} - -// ID identifies the middleware. -func (s *StreamingEventsPayload) ID() string { - return computePayloadHashMiddlewareID -} - -// HandleFinalize marks the input stream to be signed with SigV4. -func (s *StreamingEventsPayload) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - contentSHA := GetPayloadHash(ctx) - if len(contentSHA) == 0 { - contentSHA = v4Internal.StreamingEventsPayload - } - - ctx = SetPayloadHash(ctx, contentSHA) - - return next.HandleFinalize(ctx, in) -} - -// GetSignedRequestSignature attempts to extract the signature of the request. -// Returning an error if the request is unsigned, or unable to extract the -// signature. -func GetSignedRequestSignature(r *http.Request) ([]byte, error) { - const authHeaderSignatureElem = "Signature=" - - if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ",") - for _, p := range ps { - p = strings.TrimSpace(p) - if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { - sig := p[len(authHeaderSignatureElem):] - if len(sig) == 0 { - return nil, fmt.Errorf("invalid request signature authorization header") - } - return hex.DecodeString(sig) - } - } - } - - if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { - return hex.DecodeString(sig) - } - - return nil, fmt.Errorf("request not signed") -} - -func haveCredentialProvider(p aws.CredentialsProvider) bool { - if p == nil { - return false - } - - return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) -} - -type payloadHashKey struct{} - -// GetPayloadHash retrieves the payload hash to use for signing -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetPayloadHash(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) - return v -} - -// SetPayloadHash sets the payload hash to be used for signing the request -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetPayloadHash(ctx context.Context, hash string) context.Context { - return middleware.WithStackValue(ctx, payloadHashKey{}, hash) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go deleted file mode 100644 index e1a066512..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go +++ /dev/null @@ -1,127 +0,0 @@ -package v4 - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyHTTP "github.com/aws/smithy-go/transport/http" -) - -// HTTPPresigner is an interface to a SigV4 signer that can sign create a -// presigned URL for a HTTP requests. -type HTTPPresigner interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignedHTTPRequest provides the URL and signed headers that are included -// in the presigned URL. -type PresignedHTTPRequest struct { - URL string - Method string - SignedHeader http.Header -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Presigner HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - presigner HTTPPresigner - logSigning bool -} - -// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware -// initialized with the presigner. -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (s *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyHTTP.Request) - if !ok { - return out, metadata, &SigningError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - httpReq := req.Build(ctx) - if !haveCredentialProvider(s.credentialsProvider) { - out.Result = &PresignedHTTPRequest{ - URL: httpReq.URL.String(), - Method: httpReq.Method, - SignedHeader: http.Header{}, - } - - return out, metadata, nil - } - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - payloadHash := GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{ - Err: fmt.Errorf("computed payload hash missing from context"), - } - } - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - - u, h, err := s.presigner.PresignHTTP(ctx, credentials, - httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &PresignedHTTPRequest{ - URL: u, - Method: httpReq.Method, - SignedHeader: h, - } - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go deleted file mode 100644 index 66aa2bd6a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ /dev/null @@ -1,86 +0,0 @@ -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "strings" - "time" -) - -// EventStreamSigner is an AWS EventStream protocol signer. -type EventStreamSigner interface { - GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) -} - -// StreamSignerOptions is the configuration options for StreamSigner. -type StreamSignerOptions struct{} - -// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. -type StreamSigner struct { - options StreamSignerOptions - - credentials aws.Credentials - service string - region string - - prevSignature []byte - - signingKeyDeriver *v4Internal.SigningKeyDeriver -} - -// NewStreamSigner returns a new AWS EventStream protocol signer. -func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { - o := StreamSignerOptions{} - - for _, fn := range optFns { - fn(&o) - } - - return &StreamSigner{ - options: o, - credentials: credentials, - service: service, - region: region, - signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), - prevSignature: seedSignature, - } -} - -// GetSignature signs the provided header and payload bytes. -func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - prevSignature := s.prevSignature - - st := v4Internal.NewSigningTime(signingTime) - - sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) - - scope := v4Internal.BuildCredentialScope(st, s.region, s.service) - - stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) - - signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) - s.prevSignature = signature - - return signature, nil -} - -func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { - hash := sha256.New() - return strings.Join([]string{ - "AWS4-HMAC-SHA256-PAYLOAD", - signingTime.TimeFormat(), - credentialScope, - hex.EncodeToString(previousSignature), - hex.EncodeToString(makeHash(hash, headers)), - hex.EncodeToString(makeHash(hash, payload)), - }, "\n") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go deleted file mode 100644 index 7ed91d5ba..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ /dev/null @@ -1,564 +0,0 @@ -// Package v4 implements the AWS signature version 4 algorithm (commonly known -// as SigV4). -// -// For more information about SigV4, see [Signing AWS API requests] in the IAM -// user guide. -// -// While this implementation CAN work in an external context, it is developed -// primarily for SDK use and you may encounter fringe behaviors around header -// canonicalization. -// -// # Pre-escaping a request URI -// -// AWS v4 signature validation requires that the canonical string's URI path -// component must be the escaped form of the HTTP request's path. -// -// The Go HTTP client will perform escaping automatically on the HTTP request. -// This may cause signature validation errors because the request differs from -// the URI path or query from which the signature was generated. -// -// Because of this, we recommend that you explicitly escape the request when -// using this signer outside of the SDK to prevent possible signature mismatch. -// This can be done by setting URL.Opaque on the request. The signer will -// prefer that value, falling back to the return of URL.EscapedPath if unset. -// -// When setting URL.Opaque you must do so in the form of: -// -// "///" -// -// // e.g. -// "//example.com/some/path" -// -// The leading "//" and hostname are required or the escaping will not work -// correctly. -// -// The TestStandaloneSign unit test provides a complete example of using the -// signer outside of the SDK and pre-escaping the URI path. -// -// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html -package v4 - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "net/http" - "net/textproto" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/logging" -) - -const ( - signingAlgorithm = "AWS4-HMAC-SHA256" - authorizationHeader = "Authorization" - - // Version of signing v4 - Version = "SigV4" -) - -// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests -type HTTPSigner interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error -} - -type keyDerivator interface { - DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte -} - -// SignerOptions is the SigV4 Signer options. -type SignerOptions struct { - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool - - // The logger to send log messages to. - Logger logging.Logger - - // Enable logging of signed requests. - // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent - // presigned URL. - LogSigning bool - - // Disables setting the session token on the request as part of signing - // through X-Amz-Security-Token. This is needed for variations of v4 that - // present the token elsewhere. - DisableSessionToken bool -} - -// Signer applies AWS v4 signing to given request. Use this to sign requests -// that need to be signed with AWS V4 Signatures. -type Signer struct { - options SignerOptions - keyDerivator keyDerivator -} - -// NewSigner returns a new SigV4 Signer -func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { - options := SignerOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} -} - -type httpSigner struct { - Request *http.Request - ServiceName string - Region string - Time v4Internal.SigningTime - Credentials aws.Credentials - KeyDerivator keyDerivator - IsPreSign bool - - PayloadHash string - - DisableHeaderHoisting bool - DisableURIPathEscaping bool - DisableSessionToken bool -} - -func (s *httpSigner) Build() (signedRequest, error) { - req := s.Request - - query := req.URL.Query() - headers := req.Header - - s.setRequiredSigningFields(headers, query) - - // Sort Each Query Key's Values - for key := range query { - sort.Strings(query[key]) - } - - v4Internal.SanitizeHostForHeader(req) - - credentialScope := s.buildCredentialScope() - credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope - if s.IsPreSign { - query.Set(v4Internal.AmzCredentialKey, credentialStr) - } - - unsignedHeaders := headers - if s.IsPreSign && !s.DisableHeaderHoisting { - var urlValues url.Values - urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) - for k := range urlValues { - query[k] = urlValues[k] - } - } - - host := req.URL.Host - if len(req.Host) > 0 { - host = req.Host - } - - signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - - if s.IsPreSign { - query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) - } - - var rawQuery strings.Builder - rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) - - canonicalURI := v4Internal.GetURIPath(req.URL) - if !s.DisableURIPathEscaping { - canonicalURI = httpbinding.EscapePath(canonicalURI, false) - } - - canonicalString := s.buildCanonicalString( - req.Method, - canonicalURI, - rawQuery.String(), - signedHeadersStr, - canonicalHeaderStr, - ) - - strToSign := s.buildStringToSign(credentialScope, canonicalString) - signingSignature, err := s.buildSignature(strToSign) - if err != nil { - return signedRequest{}, err - } - - if s.IsPreSign { - rawQuery.WriteString("&X-Amz-Signature=") - rawQuery.WriteString(signingSignature) - } else { - headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) - } - - req.URL.RawQuery = rawQuery.String() - - return signedRequest{ - Request: req, - SignedHeaders: signedHeaders, - CanonicalString: canonicalString, - StringToSign: strToSign, - PreSigned: s.IsPreSign, - }, nil -} - -func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { - const credential = "Credential=" - const signedHeaders = "SignedHeaders=" - const signature = "Signature=" - const commaSpace = ", " - - var parts strings.Builder - parts.Grow(len(signingAlgorithm) + 1 + - len(credential) + len(credentialStr) + 2 + - len(signedHeaders) + len(signedHeadersStr) + 2 + - len(signature) + len(signingSignature), - ) - parts.WriteString(signingAlgorithm) - parts.WriteRune(' ') - parts.WriteString(credential) - parts.WriteString(credentialStr) - parts.WriteString(commaSpace) - parts.WriteString(signedHeaders) - parts.WriteString(signedHeadersStr) - parts.WriteString(commaSpace) - parts.WriteString(signature) - parts.WriteString(signingSignature) - return parts.String() -} - -// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the -// request is made to, and time the request is signed at. The signTime allows -// you to specify that a request is signed for the future, and cannot be -// used until then. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// Sign differs from Presign in that it will sign the request using HTTP -// header values. This type of signing is intended for http.Request values that -// will not be shared, or are shared in a way the header values on the request -// will not be lost. -// -// The passed in request will be modified in place. -func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return err - } - - logSigningInfo(ctx, options, &signedRequest, false) - - return nil -} - -// PresignHTTP signs AWS v4 requests with the payload hash, service name, region -// the request is made to, and time the request is signed at. The signTime -// allows you to specify that a request is signed for the future, and cannot -// be used until then. -// -// Returns the signed URL and the map of HTTP headers that were included in the -// signature or an error if signing the request failed. For presigned requests -// these headers and their values must be included on the HTTP request when it -// is made. This is helpful to know what header values need to be shared with -// the party the presigned request will be distributed to. -// -// The payloadHash is the hex encoded SHA-256 hash of the request payload, and -// must be provided. Even if the request has no payload (aka body). If the -// request has no payload you should use the hex encoded SHA-256 of an empty -// string as the payloadHash value. -// -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -// -// Some services such as Amazon S3 accept alternative values for the payload -// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be -// included in the request signature. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html -// -// PresignHTTP differs from SignHTTP in that it will sign the request using -// query string instead of header values. This allows you to share the -// Presigned Request's URL with third parties, or distribute it throughout your -// system with minimal dependencies. -// -// PresignHTTP will not set the expires time of the presigned request -// automatically. To specify the expire duration for a request add the -// "X-Amz-Expires" query parameter on the request with the value as the -// duration in seconds the presigned URL should be considered valid for. This -// parameter is not used by all AWS services, and is most notable used by -// Amazon S3 APIs. -// -// expires := 20 * time.Minute -// query := req.URL.Query() -// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) -// req.URL.RawQuery = query.Encode() -// -// This method does not modify the provided request. -func (s *Signer) PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*SignerOptions), -) (signedURI string, signedHeaders http.Header, err error) { - options := s.options - - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r.Clone(r.Context()), - PayloadHash: payloadHash, - ServiceName: service, - Region: region, - Credentials: credentials, - Time: v4Internal.NewSigningTime(signingTime.UTC()), - IsPreSign: true, - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - DisableSessionToken: options.DisableSessionToken, - KeyDerivator: s.keyDerivator, - } - - signedRequest, err := signer.Build() - if err != nil { - return "", nil, err - } - - logSigningInfo(ctx, options, &signedRequest, true) - - signedHeaders = make(http.Header) - - // For the signed headers we canonicalize the header keys in the returned map. - // This avoids situations where can standard library double headers like host header. For example the standard - // library will set the Host header, even if it is present in lower-case form. - for k, v := range signedRequest.SignedHeaders { - key := textproto.CanonicalMIMEHeaderKey(k) - signedHeaders[key] = append(signedHeaders[key], v...) - } - - return signedRequest.Request.URL.String(), signedHeaders, nil -} - -func (s *httpSigner) buildCredentialScope() string { - return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) -} - -func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - - // A list of headers to be converted to lower case to mitigate a limitation from S3 - lowerCaseHeaders := map[string]string{ - "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508 - "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764 - } - - for k, h := range header { - if newKey, ok := lowerCaseHeaders[k]; ok { - k = newKey - } - - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} - -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { - signed = make(http.Header) - - var headers []string - const hostHeader = "host" - headers = append(headers, hostHeader) - signed[hostHeader] = append(signed[hostHeader], host) - - const contentLengthHeader = "content-length" - if length > 0 { - headers = append(headers, contentLengthHeader) - signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) - } - - for k, v := range header { - if !rule.IsValid(k) { - continue // ignored header - } - if strings.EqualFold(k, contentLengthHeader) { - // prevent signing already handled content-length header. - continue - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := signed[lowerCaseKey]; ok { - // include additional values - signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - signed[lowerCaseKey] = v - } - sort.Strings(headers) - - signedHeaders = strings.Join(headers, ";") - - var canonicalHeaders strings.Builder - n := len(headers) - const colon = ':' - for i := 0; i < n; i++ { - if headers[i] == hostHeader { - canonicalHeaders.WriteString(hostHeader) - canonicalHeaders.WriteRune(colon) - canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) - } else { - canonicalHeaders.WriteString(headers[i]) - canonicalHeaders.WriteRune(colon) - // Trim out leading, trailing, and dedup inner spaces from signed header values. - values := signed[headers[i]] - for j, v := range values { - cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) - canonicalHeaders.WriteString(cleanedValue) - if j < len(values)-1 { - canonicalHeaders.WriteRune(',') - } - } - } - canonicalHeaders.WriteRune('\n') - } - canonicalHeadersStr = canonicalHeaders.String() - - return signed, signedHeaders, canonicalHeadersStr -} - -func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { - return strings.Join([]string{ - method, - uri, - query, - canonicalHeaders, - signedHeaders, - s.PayloadHash, - }, "\n") -} - -func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { - return strings.Join([]string{ - signingAlgorithm, - s.Time.TimeFormat(), - credentialScope, - hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), - }, "\n") -} - -func makeHash(hash hash.Hash, b []byte) []byte { - hash.Reset() - hash.Write(b) - return hash.Sum(nil) -} - -func (s *httpSigner) buildSignature(strToSign string) (string, error) { - key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) - return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil -} - -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { - amzDate := s.Time.TimeFormat() - - if s.IsPreSign { - query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) - sessionToken := s.Credentials.SessionToken - if !s.DisableSessionToken && len(sessionToken) > 0 { - query.Set("X-Amz-Security-Token", sessionToken) - } - - query.Set(v4Internal.AmzDateKey, amzDate) - return - } - - headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) - - if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 { - headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) - } -} - -func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { - if !options.LogSigning { - return - } - signedURLMsg := "" - if isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) - } - logger := logging.WithContext(ctx, options.Logger) - logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) -} - -type signedRequest struct { - Request *http.Request - SignedHeaders http.Header - CanonicalString string - StringToSign string - PreSigned bool -} - -const logSignInfoMsg = `Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go deleted file mode 100644 index f3fc4d610..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by aws/generate.go DO NOT EDIT. - -package aws - -import ( - "github.com/aws/smithy-go/ptr" - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return ptr.Bool(v) -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - return ptr.BoolSlice(vs) -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - return ptr.BoolMap(vs) -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return ptr.Byte(v) -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - return ptr.ByteSlice(vs) -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - return ptr.ByteMap(vs) -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return ptr.String(v) -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - return ptr.StringSlice(vs) -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - return ptr.StringMap(vs) -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return ptr.Int(v) -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - return ptr.IntSlice(vs) -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - return ptr.IntMap(vs) -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return ptr.Int8(v) -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - return ptr.Int8Slice(vs) -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - return ptr.Int8Map(vs) -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return ptr.Int16(v) -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - return ptr.Int16Slice(vs) -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - return ptr.Int16Map(vs) -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return ptr.Int32(v) -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - return ptr.Int32Slice(vs) -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - return ptr.Int32Map(vs) -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return ptr.Int64(v) -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - return ptr.Int64Slice(vs) -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - return ptr.Int64Map(vs) -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return ptr.Uint(v) -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - return ptr.UintSlice(vs) -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - return ptr.UintMap(vs) -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return ptr.Uint8(v) -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - return ptr.Uint8Slice(vs) -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - return ptr.Uint8Map(vs) -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return ptr.Uint16(v) -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - return ptr.Uint16Slice(vs) -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - return ptr.Uint16Map(vs) -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return ptr.Uint32(v) -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - return ptr.Uint32Slice(vs) -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - return ptr.Uint32Map(vs) -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return ptr.Uint64(v) -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - return ptr.Uint64Slice(vs) -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - return ptr.Uint64Map(vs) -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return ptr.Float32(v) -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - return ptr.Float32Slice(vs) -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - return ptr.Float32Map(vs) -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return ptr.Float64(v) -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - return ptr.Float64Slice(vs) -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - return ptr.Float64Map(vs) -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return ptr.Time(v) -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - return ptr.TimeSlice(vs) -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - return ptr.TimeMap(vs) -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return ptr.Duration(v) -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - return ptr.DurationSlice(vs) -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - return ptr.DurationMap(vs) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go deleted file mode 100644 index 8d7c35a9e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ /dev/null @@ -1,342 +0,0 @@ -package http - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "reflect" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/tracing" -) - -// Defaults for the HTTPTransportBuilder. -var ( - // Default connection pool options - DefaultHTTPTransportMaxIdleConns = 100 - DefaultHTTPTransportMaxIdleConnsPerHost = 10 - - // Default connection timeouts - DefaultHTTPTransportIdleConnTimeout = 90 * time.Second - DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second - DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second - - // Default to TLS 1.2 for all HTTPS requests. - DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 -) - -// Timeouts for net.Dialer's network connection. -var ( - DefaultDialConnectTimeout = 30 * time.Second - DefaultDialKeepAliveTimeout = 30 * time.Second -) - -// BuildableClient provides a HTTPClient implementation with options to -// create copies of the HTTPClient when additional configuration is provided. -// -// The client's methods will not share the http.Transport value between copies -// of the BuildableClient. Only exported member values of the Transport and -// optional Dialer will be copied between copies of BuildableClient. -type BuildableClient struct { - transport *http.Transport - dialer *net.Dialer - - initOnce sync.Once - - clientTimeout time.Duration - client *http.Client -} - -// NewBuildableClient returns an initialized client for invoking HTTP -// requests. -func NewBuildableClient() *BuildableClient { - return &BuildableClient{} -} - -// Do implements the HTTPClient interface's Do method to invoke a HTTP request, -// and receive the response. Uses the BuildableClient's current -// configuration to invoke the http.Request. -// -// If connection pooling is enabled (aka HTTP KeepAlive) the client will only -// share pooled connections with its own instance. Copies of the -// BuildableClient will have their own connection pools. -// -// Redirect (3xx) responses will not be followed, the HTTP response received -// will returned instead. -func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { - b.initOnce.Do(b.build) - - return b.client.Do(req) -} - -// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. -// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. -func (b *BuildableClient) Freeze() aws.HTTPClient { - cpy := b.clone() - cpy.build() - return cpy.client -} - -func (b *BuildableClient) build() { - b.client = wrapWithLimitedRedirect(&http.Client{ - Timeout: b.clientTimeout, - Transport: b.GetTransport(), - }) -} - -func (b *BuildableClient) clone() *BuildableClient { - cpy := NewBuildableClient() - cpy.transport = b.GetTransport() - cpy.dialer = b.GetDialer() - cpy.clientTimeout = b.clientTimeout - - return cpy -} - -// WithTransportOptions copies the BuildableClient and returns it with the -// http.Transport options applied. -// -// If a non (*http.Transport) was set as the round tripper, the round tripper -// will be replaced with a default Transport value before invoking the option -// functions. -func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { - cpy := b.clone() - - tr := cpy.GetTransport() - for _, opt := range opts { - opt(tr) - } - cpy.transport = tr - - return cpy -} - -// WithDialerOptions copies the BuildableClient and returns it with the -// net.Dialer options applied. Will set the client's http.Transport DialContext -// member. -func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { - cpy := b.clone() - - dialer := cpy.GetDialer() - for _, opt := range opts { - opt(dialer) - } - cpy.dialer = dialer - - tr := cpy.GetTransport() - tr.DialContext = cpy.dialer.DialContext - cpy.transport = tr - - return cpy -} - -// WithTimeout Sets the timeout used by the client for all requests. -func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { - cpy := b.clone() - cpy.clientTimeout = timeout - return cpy -} - -// GetTransport returns a copy of the client's HTTP Transport. -func (b *BuildableClient) GetTransport() *http.Transport { - var tr *http.Transport - if b.transport != nil { - tr = b.transport.Clone() - } else { - tr = defaultHTTPTransport() - } - - return tr -} - -// GetDialer returns a copy of the client's network dialer. -func (b *BuildableClient) GetDialer() *net.Dialer { - var dialer *net.Dialer - if b.dialer != nil { - dialer = shallowCopyStruct(b.dialer).(*net.Dialer) - } else { - dialer = defaultDialer() - } - - return dialer -} - -// GetTimeout returns a copy of the client's timeout to cancel requests with. -func (b *BuildableClient) GetTimeout() time.Duration { - return b.clientTimeout -} - -func defaultDialer() *net.Dialer { - return &net.Dialer{ - Timeout: DefaultDialConnectTimeout, - KeepAlive: DefaultDialKeepAliveTimeout, - DualStack: true, - } -} - -func defaultHTTPTransport() *http.Transport { - dialer := defaultDialer() - - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: traceDialContext(dialer.DialContext), - TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, - MaxIdleConns: DefaultHTTPTransportMaxIdleConns, - MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, - IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, - ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, - ForceAttemptHTTP2: true, - TLSClientConfig: &tls.Config{ - MinVersion: DefaultHTTPTransportTLSMinVersion, - }, - } - - return tr -} - -type dialContext func(ctx context.Context, network, addr string) (net.Conn, error) - -func traceDialContext(dc dialContext) dialContext { - return func(ctx context.Context, network, addr string) (net.Conn, error) { - span, _ := tracing.GetSpan(ctx) - span.SetProperty("net.peer.name", addr) - - conn, err := dc(ctx, network, addr) - if err != nil { - return conn, err - } - - raddr := conn.RemoteAddr() - if raddr == nil { - return conn, err - } - - host, port, err := net.SplitHostPort(raddr.String()) - if err != nil { // don't blow up just because we couldn't parse - span.SetProperty("net.peer.addr", raddr.String()) - } else { - span.SetProperty("net.peer.host", host) - span.SetProperty("net.peer.port", port) - } - - return conn, err - } -} - -// shallowCopyStruct creates a shallow copy of the passed in source struct, and -// returns that copy of the same struct type. -func shallowCopyStruct(src interface{}) interface{} { - srcVal := reflect.ValueOf(src) - srcValType := srcVal.Type() - - var returnAsPtr bool - if srcValType.Kind() == reflect.Ptr { - srcVal = srcVal.Elem() - srcValType = srcValType.Elem() - returnAsPtr = true - } - dstVal := reflect.New(srcValType).Elem() - - for i := 0; i < srcValType.NumField(); i++ { - ft := srcValType.Field(i) - if len(ft.PkgPath) != 0 { - // unexported fields have a PkgPath - continue - } - - dstVal.Field(i).Set(srcVal.Field(i)) - } - - if returnAsPtr { - dstVal = dstVal.Addr() - } - - return dstVal.Interface() -} - -// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to -// not follow any redirect other than 307 and 308. No other redirect will be -// followed. -// -// If the client does not have a Transport defined will use a new SDK default -// http.Transport configuration. -func wrapWithLimitedRedirect(c *http.Client) *http.Client { - tr := c.Transport - if tr == nil { - tr = defaultHTTPTransport() - } - - cc := *c - cc.CheckRedirect = limitedRedirect - cc.Transport = suppressBadHTTPRedirectTransport{ - tr: tr, - } - - return &cc -} - -// limitedRedirect is a CheckRedirect that prevents the client from following -// any non 307/308 HTTP status code redirects. -// -// The 307 and 308 redirects are allowed because the client must use the -// original HTTP method for the redirected to location. Whereas 301 and 302 -// allow the client to switch to GET for the redirect. -// -// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. -func limitedRedirect(r *http.Request, via []*http.Request) error { - // Request.Response, in CheckRedirect is the response that is triggering - // the redirect. - resp := r.Response - if r.URL.String() == badHTTPRedirectLocation { - resp.Header.Del(badHTTPRedirectLocation) - return http.ErrUseLastResponse - } - - switch resp.StatusCode { - case 307, 308: - // Only allow 307 and 308 redirects as they preserve the method. - return nil - } - - return http.ErrUseLastResponse -} - -// suppressBadHTTPRedirectTransport provides an http.RoundTripper -// implementation that wraps another http.RoundTripper to prevent HTTP client -// receiving 301 and 302 HTTP responses redirects without the required location -// header. -// -// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, -// that check for responses with having a URL of baseHTTPRedirectLocation, and -// suppress the redirect. -type suppressBadHTTPRedirectTransport struct { - tr http.RoundTripper -} - -const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` - -// RoundTrip backfills a stub location when a 301/302 response is received -// without a location. This stub location is used by limitedRedirect to prevent -// the HTTP client from failing attempting to use follow a redirect without a -// location value. -func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { - resp, err := t.tr.RoundTrip(r) - if err != nil { - return resp, err - } - - // S3 is the only known service to return 301 without location header. - // The Go standard library HTTP client will return an opaque error if it - // tries to follow a 301/302 response missing the location header. - switch resp.StatusCode { - case 301, 302: - if v := resp.Header.Get("Location"); len(v) == 0 { - resp.Header.Set("Location", badHTTPRedirectLocation) - } - } - - return resp, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go deleted file mode 100644 index 556f54a7f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go +++ /dev/null @@ -1,42 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// removeContentTypeHeader is a build middleware that removes -// content type header if content-length header is unset or -// is set to zero, -type removeContentTypeHeader struct { -} - -// ID the name of the middleware. -func (m *removeContentTypeHeader) ID() string { - return "RemoveContentTypeHeader" -} - -// HandleBuild adds or appends the constructed user agent to the request. -func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - // remove contentTypeHeader when content-length is zero - if req.ContentLength == 0 { - req.Header.Del("content-type") - } - - return next.HandleBuild(ctx, in) -} - -// RemoveContentTypeHeader removes content-type header if -// content length is unset or equal to zero. -func RemoveContentTypeHeader(stack *middleware.Stack) error { - return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go deleted file mode 100644 index 44651c990..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package http - -import ( - "errors" - "fmt" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. -type ResponseError struct { - *smithyhttp.ResponseError - - // RequestID associated with response error - RequestID string -} - -// ServiceRequestID returns the request id associated with Response Error -func (e *ResponseError) ServiceRequestID() string { return e.RequestID } - -// Error returns the formatted error -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "https response error StatusCode: %d, RequestID: %s, %v", - e.Response.StatusCode, e.RequestID, e.Err) -} - -// As populates target and returns true if the type of target is a error type that -// the ResponseError embeds, (e.g.AWS HTTP ResponseError) -func (e *ResponseError) As(target interface{}) bool { - return errors.As(e.ResponseError, target) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go deleted file mode 100644 index a1ad20fe3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go +++ /dev/null @@ -1,56 +0,0 @@ -package http - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddResponseErrorMiddleware adds response error wrapper middleware -func AddResponseErrorMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before request id retriever middleware so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) -} - -// ResponseErrorWrapper wraps operation errors with ResponseError. -type ResponseErrorWrapper struct { -} - -// ID returns the middleware identifier -func (m *ResponseErrorWrapper) ID() string { - return "ResponseErrorWrapper" -} - -// HandleDeserialize wraps the stack error with smithyhttp.ResponseError. -func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err == nil { - // Nothing to do when there is no error. - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // look for request id in metadata - reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) - - // Wrap the returned smithy error with the request id retrieved from the metadata - err = &ResponseError{ - ResponseError: &smithyhttp.ResponseError{ - Response: resp, - Err: err, - }, - RequestID: reqID, - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go deleted file mode 100644 index 993929bd9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go +++ /dev/null @@ -1,104 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type readResult struct { - n int - err error -} - -// ResponseTimeoutError is an error when the reads from the response are -// delayed longer than the timeout the read was configured for. -type ResponseTimeoutError struct { - TimeoutDur time.Duration -} - -// Timeout returns that the error is was caused by a timeout, and can be -// retried. -func (*ResponseTimeoutError) Timeout() bool { return true } - -func (e *ResponseTimeoutError) Error() string { - return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) -} - -// timeoutReadCloser will handle body reads that take too long. -// We will return a ErrReadTimeout error if a timeout occurs. -type timeoutReadCloser struct { - reader io.ReadCloser - duration time.Duration -} - -// Read will spin off a goroutine to call the reader's Read method. We will -// select on the timer's channel or the read's channel. Whoever completes first -// will be returned. -func (r *timeoutReadCloser) Read(b []byte) (int, error) { - timer := time.NewTimer(r.duration) - c := make(chan readResult, 1) - - go func() { - n, err := r.reader.Read(b) - timer.Stop() - c <- readResult{n: n, err: err} - }() - - select { - case data := <-c: - return data.n, data.err - case <-timer.C: - return 0, &ResponseTimeoutError{TimeoutDur: r.duration} - } -} - -func (r *timeoutReadCloser) Close() error { - return r.reader.Close() -} - -// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the -// response body so that a read that takes too long will return an error. -func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { - return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) -} - -// readTimeout wraps the response body with a timeoutReadCloser -type readTimeout struct { - duration time.Duration -} - -// ID returns the id of the middleware -func (*readTimeout) ID() string { - return "ReadResponseTimeout" -} - -// HandleDeserialize implements the DeserializeMiddleware interface -func (m *readTimeout) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - response.Body = &timeoutReadCloser{ - reader: response.Body, - duration: m.duration, - } - out.RawResponse = response - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go deleted file mode 100644 index cc3ae8114..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go +++ /dev/null @@ -1,42 +0,0 @@ -package aws - -import ( - "fmt" -) - -// Ternary is an enum allowing an unknown or none state in addition to a bool's -// true and false. -type Ternary int - -func (t Ternary) String() string { - switch t { - case UnknownTernary: - return "unknown" - case FalseTernary: - return "false" - case TrueTernary: - return "true" - default: - return fmt.Sprintf("unknown value, %d", int(t)) - } -} - -// Bool returns true if the value is TrueTernary, false otherwise. -func (t Ternary) Bool() bool { - return t == TrueTernary -} - -// Enumerations for the values of the Ternary type. -const ( - UnknownTernary Ternary = iota - FalseTernary - TrueTernary -) - -// BoolTernary returns a true or false Ternary value for the bool provided. -func BoolTernary(v bool) Ternary { - if v { - return TrueTernary - } - return FalseTernary -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go deleted file mode 100644 index 5f729d45e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go-v2" - -// SDKVersion is the version of this SDK -const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md deleted file mode 100644 index 996103e5d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ /dev/null @@ -1,798 +0,0 @@ -# v1.29.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.11 (2025-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.10 (2025-01-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.9 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2025-01-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-10-16) - -* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code. - -# v1.27.43 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.42 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.41 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.40 (2024-10-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.39 (2024-09-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.38 (2024-09-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.37 (2024-09-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.36 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.35 (2024-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.34 (2024-09-16) - -* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it - -# v1.27.33 (2024-09-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.32 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.31 (2024-08-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.30 (2024-08-23) - -* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file - -# v1.27.29 (2024-08-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.28 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.27 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.26 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.25 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.24 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.23 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.22 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.21 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.20 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.19 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.18 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.17 (2024-06-03) - -* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.16 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.15 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.14 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.13 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.12 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.11 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.10 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.4 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-12-07) - -* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.12 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.11 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.10 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.9 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.8 (2023-11-28.3) - -* **Bug Fix**: Correct resolution of S3Express auth disable toggle. - -# v1.25.7 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.2 (2023-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2023-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2023-11-06) - -* No change notes available for this release. - -# v1.22.0 (2023-11-02) - -* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-10-24) - -* No change notes available for this release. - -# v1.19.0 (2023-10-16) - -* **Feature**: Modify logic of retrieving user agent appID from env config - -# v1.18.45 (2023-10-12) - -* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.43 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.42 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.41 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.40 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.39 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.38 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.37 (2023-08-23) - -* No change notes available for this release. - -# v1.18.36 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.35 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.34 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.33 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.32 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.29 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.28 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.27 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.26 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.25 (2023-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.24 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.23 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.22 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.21 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.20 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.19 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.18 (2023-03-16) - -* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. - -# v1.18.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.11 (2023-02-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-08-14) - -* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. - -# v1.16.1 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-08-10) - -* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. - -# v1.15.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.14 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.13 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.12 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.11 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.10 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.9 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.8 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.7 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2022-05-09) - -* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) - -# v1.15.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-02-24) - -* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. -* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-01-28) - -* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. -* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-01-07) - -* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-12-02) - -* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-09-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-09-02) - -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-25) - -* **Feature**: Adds configuration setting for enabling endpoint discovery. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-20) - -* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. -* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go deleted file mode 100644 index 0577c6186..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ /dev/null @@ -1,228 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// defaultAWSConfigResolvers are a slice of functions that will resolve external -// configuration values into AWS configuration values. -// -// This will setup the AWS configuration's Region, -var defaultAWSConfigResolvers = []awsConfigResolver{ - // Resolves the default configuration the SDK's aws.Config will be - // initialized with. - resolveDefaultAWSConfig, - - // Sets the logger to be used. Could be user provided logger, and client - // logging mode. - resolveLogger, - resolveClientLogMode, - - // Sets the HTTP client and configuration to use for making requests using - // the HTTP transport. - resolveHTTPClient, - resolveCustomCABundle, - - // Sets the endpoint resolving behavior the API Clients will use for making - // requests to. Clients default to their own clients this allows overrides - // to be specified. The resolveEndpointResolver option is deprecated, but - // we still need to set it for backwards compatibility on config - // construction. - resolveEndpointResolver, - resolveEndpointResolverWithOptions, - - // Sets the retry behavior API clients will use within their retry attempt - // middleware. Defaults to unset, allowing API clients to define their own - // retry behavior. - resolveRetryer, - - // Sets the region the API Clients should use for making requests to. - resolveRegion, - resolveEC2IMDSRegion, - resolveDefaultRegion, - - // Sets the additional set of middleware stack mutators that will custom - // API client request pipeline middleware. - resolveAPIOptions, - - // Resolves the DefaultsMode that should be used by SDK clients. If this - // mode is set to DefaultsModeAuto. - // - // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is - // configured if provided before invoking IMDS if mode is auto. Comes - // before resolving credentials so that those subsequent clients use the - // configured auto mode. - resolveDefaultsModeOptions, - - // Sets the resolved credentials the API clients will use for - // authentication. Provides the SDK's default credential chain. - // - // Should probably be the last step in the resolve chain to ensure that all - // other configurations are resolved first in case downstream credentials - // implementations depend on or can be configured with earlier resolved - // configuration options. - resolveCredentials, - - // Sets the resolved bearer authentication token API clients will use for - // httpBearerAuth authentication scheme. - resolveBearerAuthToken, - - // Sets the sdk app ID if present in env var or shared config profile - resolveAppID, - - resolveBaseEndpoint, - - // Sets the DisableRequestCompression if present in env var or shared config profile - resolveDisableRequestCompression, - - // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile - resolveRequestMinCompressSizeBytes, - - // Sets the AccountIDEndpointMode if present in env var or shared config profile - resolveAccountIDEndpointMode, - - // Sets the RequestChecksumCalculation if present in env var or shared config profile - resolveRequestChecksumCalculation, - - // Sets the ResponseChecksumValidation if present in env var or shared config profile - resolveResponseChecksumValidation, -} - -// A Config represents a generic configuration value or set of values. This type -// will be used by the AWSConfigResolvers to extract -// -// General the Config type will use type assertion against the Provider interfaces -// to extract specific data from the Config. -type Config interface{} - -// A loader is used to load external configuration data and returns it as -// a generic Config type. -// -// The loader should return an error if it fails to load the external configuration -// or the configuration data is malformed, or required components missing. -type loader func(context.Context, configs) (Config, error) - -// An awsConfigResolver will extract configuration data from the configs slice -// using the provider interfaces to extract specific functionality. The extracted -// configuration values will be written to the AWS Config value. -// -// The resolver should return an error if it it fails to extract the data, the -// data is malformed, or incomplete. -type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error - -// configs is a slice of Config values. These values will be used by the -// AWSConfigResolvers to extract external configuration values to populate the -// AWS Config type. -// -// Use AppendFromLoaders to add additional external Config values that are -// loaded from external sources. -// -// Use ResolveAWSConfig after external Config values have been added or loaded -// to extract the loaded configuration values into the AWS Config. -type configs []Config - -// AppendFromLoaders iterates over the slice of loaders passed in calling each -// loader function in order. The external config value returned by the loader -// will be added to the returned configs slice. -// -// If a loader returns an error this method will stop iterating and return -// that error. -func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { - for _, fn := range loaders { - cfg, err := fn(ctx, cs) - if err != nil { - return nil, err - } - - cs = append(cs, cfg) - } - - return cs, nil -} - -// ResolveAWSConfig returns a AWS configuration populated with values by calling -// the resolvers slice passed in. Each resolver is called in order. Any resolver -// may overwrite the AWS Configuration value of a previous resolver. -// -// If an resolver returns an error this method will return that error, and stop -// iterating over the resolvers. -func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { - var cfg aws.Config - - for _, fn := range resolvers { - if err := fn(ctx, &cfg, cs); err != nil { - return aws.Config{}, err - } - } - - return cfg, nil -} - -// ResolveConfig calls the provide function passing slice of configuration sources. -// This implements the aws.ConfigResolver interface. -func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { - var cfgs []interface{} - for i := range cs { - cfgs = append(cfgs, cs[i]) - } - return f(cfgs) -} - -// LoadDefaultConfig reads the SDK's default external configurations, and -// populates an AWS Config with the values from the external configurations. -// -// An optional variadic set of additional Config values can be provided as input -// that will be prepended to the configs slice. Use this to add custom configuration. -// The custom configurations must satisfy the respective providers for their data -// or the custom data will be ignored by the resolvers and config loaders. -// -// cfg, err := config.LoadDefaultConfig( context.TODO(), -// config.WithSharedConfigProfile("test-profile"), -// ) -// if err != nil { -// panic(fmt.Sprintf("failed loading config, %v", err)) -// } -// -// The default configuration sources are: -// * Environment Variables -// * Shared Configuration and Shared Credentials files. -func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { - var options LoadOptions - for _, optFn := range optFns { - if err := optFn(&options); err != nil { - return aws.Config{}, err - } - } - - // assign Load Options to configs - var cfgCpy = configs{options} - - cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) - if err != nil { - return aws.Config{}, err - } - - cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) - if err != nil { - return aws.Config{}, err - } - - return cfg, nil -} - -func resolveConfigLoaders(options *LoadOptions) []loader { - loaders := make([]loader, 2) - loaders[0] = loadEnvConfig - - // specification of a profile should cause a load failure if it doesn't exist - if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { - loaders[1] = loadSharedConfig - } else { - loaders[1] = loadSharedConfigIgnoreNotExist - } - - return loaders -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go deleted file mode 100644 index 20b66367f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go +++ /dev/null @@ -1,47 +0,0 @@ -package config - -import ( - "context" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" -) - -const execEnvVar = "AWS_EXECUTION_ENV" - -// DefaultsModeOptions is the set of options that are used to configure -type DefaultsModeOptions struct { - // The SDK configuration defaults mode. Defaults to legacy if not specified. - // - // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard - Mode aws.DefaultsMode - - // The EC2 Instance Metadata Client that should be used when performing environment - // discovery when aws.DefaultsModeAuto is set. - // - // If not specified the SDK will construct a client if the instance metadata service has not been disabled by - // the AWS_EC2_METADATA_DISABLED environment variable. - IMDSClient *imds.Client -} - -func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { - getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) - // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. - select { - case <-ctx.Done(): - return aws.RuntimeEnvironment{}, err - default: - } - - var imdsRegion string - if err == nil { - imdsRegion = getRegionOutput.Region - } - - return aws.RuntimeEnvironment{ - EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), - Region: envConfig.Region, - EC2InstanceMetadataRegion: imdsRegion, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go deleted file mode 100644 index aab7164e2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package config provides utilities for loading configuration from multiple -// sources that can be used to configure the SDK's API clients, and utilities. -// -// The config package will load configuration from environment variables, AWS -// shared configuration file (~/.aws/config), and AWS shared credentials file -// (~/.aws/credentials). -// -// Use the LoadDefaultConfig to load configuration from all the SDK's supported -// sources, and resolve credentials using the SDK's default credential chain. -// -// LoadDefaultConfig allows for a variadic list of additional Config sources that can -// provide one or more configuration values which can be used to programmatically control the resolution -// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. -// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will -// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources -// implement the same provider interface, priority will be handled by the order in which the sources were passed in. -// -// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider -// interface. These helpers should be used for overriding configuration programmatically at runtime. -package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go deleted file mode 100644 index a672850f5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ /dev/null @@ -1,919 +0,0 @@ -package config - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -// CredentialsSourceName provides a name of the provider when config is -// loaded from environment. -const CredentialsSourceName = "EnvConfigCredentials" - -// Environment variables that will be read for configuration values. -const ( - awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" - awsAccessKeyEnvVar = "AWS_ACCESS_KEY" - - awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - awsSecretKeyEnvVar = "AWS_SECRET_KEY" - - awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" - - awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - - awsRegionEnvVar = "AWS_REGION" - awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" - - awsProfileEnvVar = "AWS_PROFILE" - awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" - - awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" - - awsConfigFileEnvVar = "AWS_CONFIG_FILE" - - awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" - - awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" - - awsRoleARNEnvVar = "AWS_ROLE_ARN" - awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" - - awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" - - awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" - - awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" - - awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" - awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" - - awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" - - awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" - - awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" - - awsDefaultMode = "AWS_DEFAULTS_MODE" - - awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" - awsRetryMode = "AWS_RETRY_MODE" - awsSdkAppID = "AWS_SDK_UA_APP_ID" - - awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" - awsEndpointURL = "AWS_ENDPOINT_URL" - - awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" - awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" - - awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" - - awsAccountIDEnv = "AWS_ACCOUNT_ID" - awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" - - awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" - awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" -) - -var ( - credAccessEnvKeys = []string{ - awsAccessKeyIDEnvVar, - awsAccessKeyEnvVar, - } - credSecretEnvKeys = []string{ - awsSecretAccessKeyEnvVar, - awsSecretKeyEnvVar, - } - regionEnvKeys = []string{ - awsRegionEnvVar, - awsDefaultRegionEnvVar, - } - profileEnvKeys = []string{ - awsProfileEnvVar, - awsDefaultProfileEnvVar, - } -) - -// EnvConfig is a collection of environment values the SDK will read -// setup config from. All environment values are optional. But some values -// such as credentials require multiple values to be complete or the values -// will be ignored. -type EnvConfig struct { - // Environment configuration values. If set both Access Key ID and Secret Access - // Key must be provided. Session Token and optionally also be provided, but is - // not required. - // - // # Access Key ID - // AWS_ACCESS_KEY_ID=AKID - // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. - // - // # Secret Access Key - // AWS_SECRET_ACCESS_KEY=SECRET - // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. - // - // # Session Token - // AWS_SESSION_TOKEN=TOKEN - Credentials aws.Credentials - - // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials - // using the endpointcreds.Provider - ContainerCredentialsEndpoint string - - // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve - // credentials from the container endpoint. - ContainerCredentialsRelativePath string - - // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization - // header when attempting to retrieve credentials from the container credentials endpoint. - ContainerAuthorizationToken string - - // Region value will instruct the SDK where to make service API requests to. If is - // not provided in the environment the region must be provided before a service - // client request is made. - // - // AWS_REGION=us-west-2 - // AWS_DEFAULT_REGION=us-west-2 - Region string - - // Profile name the SDK should load use when loading shared configuration from the - // shared configuration files. If not provided "default" will be used as the - // profile name. - // - // AWS_PROFILE=my_profile - // AWS_DEFAULT_PROFILE=my_profile - SharedConfigProfile string - - // Shared credentials file path can be set to instruct the SDK to use an alternate - // file for the shared credentials. If not set the file will be loaded from - // $HOME/.aws/credentials on Linux/Unix based systems, and - // %USERPROFILE%\.aws\credentials on Windows. - // - // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials - SharedCredentialsFile string - - // Shared config file path can be set to instruct the SDK to use an alternate - // file for the shared config. If not set the file will be loaded from - // $HOME/.aws/config on Linux/Unix based systems, and - // %USERPROFILE%\.aws\config on Windows. - // - // AWS_CONFIG_FILE=$HOME/my_shared_config - SharedConfigFile string - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. - // Only use this if you want to configure the SDK to use a custom set - // of CAs. - // - // Enabling this option will attempt to merge the Transport - // into the SDK's HTTP client. If the client's Transport is - // not a http.Transport an error will be returned. If the - // Transport's TLS config is set this option will cause the - // SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this setting. - // To use this option and custom HTTP client, the HTTP client needs to be provided - // when creating the config. Not the service client. - // - // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle - CustomCABundle string - - // Enables endpoint discovery via environment variables. - // - // AWS_ENABLE_ENDPOINT_DISCOVERY=true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies the WebIdentity token the SDK should use to assume a role - // with. - // - // AWS_WEB_IDENTITY_TOKEN_FILE=file_path - WebIdentityTokenFilePath string - - // Specifies the IAM role arn to use when assuming an role. - // - // AWS_ROLE_ARN=role_arn - RoleARN string - - // Specifies the IAM role session name to use when assuming a role. - // - // AWS_ROLE_SESSION_NAME=session_name - RoleSessionName string - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // AWS_S3_USE_ARN_REGION=true - S3UseARNRegion *bool - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies if EC2 IMDSv1 fallback is disabled. - // - // AWS_EC2_METADATA_V1_DISABLED=true - EC2IMDSv1Disabled *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies if the S3 service should disable multi-region access points - // support. - // - // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // AWS_USE_DUALSTACK_ENDPOINT=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // AWS_USE_FIPS_ENDPOINT=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK Defaults Mode used by services. - // - // AWS_DEFAULTS_MODE=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // AWS_MAX_ATTEMPTS=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // aws_retry_mode=standard - RetryMode aws.RetryMode - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // determine if request compression is allowed, default to false - // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool - - // Indicates whether account ID will be required/ignored in endpoint2.0 routing - AccountIDEndpointMode aws.AccountIDEndpointMode - - // Indicates whether request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Indicates whether response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation -} - -// loadEnvConfig reads configuration values from the OS's environment variables. -// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. -func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { - return NewEnvConfig() -} - -// NewEnvConfig retrieves the SDK's environment configuration. -// See `EnvConfig` for the values that will be retrieved. -func NewEnvConfig() (EnvConfig, error) { - var cfg EnvConfig - - creds := aws.Credentials{ - Source: CredentialsSourceName, - } - setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) - setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) - if creds.HasKeys() { - creds.AccountID = os.Getenv(awsAccountIDEnv) - creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) - cfg.Credentials = creds - } - - cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) - cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) - cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) - - setStringFromEnvVal(&cfg.Region, regionEnvKeys) - setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) - - cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) - cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) - - cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) - - cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) - - cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) - cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) - - cfg.AppID = os.Getenv(awsSdkAppID) - - if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { - return cfg, err - } - if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { - return cfg, err - } - - if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { - return cfg, err - } - - setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { - return cfg, err - } - cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) - if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { - return cfg, err - } - - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { - return cfg, err - } - - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { - return cfg, err - } - - if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { - return cfg, err - } - - if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { - return cfg, err - } - if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { - return cfg, err - } - - setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) - - if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { - return cfg, err - } - - if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil { - return cfg, err - } - - if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { - return cfg, err - } - - if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { - return cfg, err - } - if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil { - return cfg, err - } - - return cfg, nil -} - -func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - return c.DefaultsMode, true, nil -} - -func (c EnvConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { - return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil -} - -func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { - return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil -} - -func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) { - return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil -} - -// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, -// and not 0. -func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a -// valid value. -func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - return c.RetryMode, true, nil -} - -func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - switch { - case strings.EqualFold(value, "true"): - *state = imds.ClientDisabled - case strings.EqualFold(value, "false"): - *state = imds.ClientEnabled - default: - continue - } - break - } -} - -func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid %s value: %s", k, value) - } - break - } - } - return nil -} - -func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { - for _, k := range keys { - if value := os.Getenv(k); len(value) > 0 { - *mode, err = aws.ParseRetryMode(value) - if err != nil { - return fmt.Errorf("invalid %s value, %w", k, err) - } - break - } - } - return nil -} - -func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - if err := mode.SetFromString(value); err != nil { - return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) - } - } - return nil -} - -func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch value { - case "preferred": - *m = aws.AccountIDEndpointModePreferred - case "required": - *m = aws.AccountIDEndpointModeRequired - case "disabled": - *m = aws.AccountIDEndpointModeDisabled - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) - } - break - } - return nil -} - -func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch strings.ToLower(value) { - case checksumWhenSupported: - *m = aws.RequestChecksumCalculationWhenSupported - case checksumWhenRequired: - *m = aws.RequestChecksumCalculationWhenRequired - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) - } - } - return nil -} - -func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - switch strings.ToLower(value) { - case checksumWhenSupported: - *m = aws.ResponseChecksumValidationWhenSupported - case checksumWhenRequired: - *m = aws.ResponseChecksumValidationWhenRequired - default: - return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) - } - - } - return nil -} - -// GetRegion returns the AWS Region if set in the environment. Returns an empty -// string if not set. -func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetSharedConfigProfile returns the shared config profile if set in the -// environment. Returns an empty string if not set. -func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(c.SharedConfigProfile) == 0 { - return "", false, nil - } - - return c.SharedConfigProfile, true, nil -} - -// getSharedConfigFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Config -func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedConfigFile; len(v) > 0 { - files = append(files, v) - } - - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// getSharedCredentialsFiles returns a slice of filenames set in the environment. -// -// Will return the filenames in the order of: -// * Shared Credentials -func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { - var files []string - if v := c.SharedCredentialsFile; len(v) > 0 { - files = append(files, v) - } - if len(files) == 0 { - return nil, false, nil - } - return files, true, nil -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := ioutil.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { - return endpt, true, nil - } - return "", false, nil -} - -func normalizeEnv(sdkID string) string { - upper := strings.ToUpper(sdkID) - return strings.ReplaceAll(upper, " ", "_") -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point -// support for the S3 client. -func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -func setStringFromEnvVal(dst *string, keys []string) { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - *dst = v - break - } - } -} - -func setIntFromEnvVal(dst *int, keys []string) error { - for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return fmt.Errorf("invalid value %s=%s, %w", k, v, err) - } - *dst = int(i) - break - } - } - - return nil -} - -func setBoolPtrFromEnvVal(dst **bool, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - if *dst == nil { - *dst = new(bool) - } - - switch { - case strings.EqualFold(value, "false"): - **dst = false - case strings.EqualFold(value, "true"): - **dst = true - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true or false", - k, value) - } - break - } - - return nil -} - -func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue - } - - v, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value) - } else if v < 0 || v > max { - return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v) - } - if *dst == nil { - *dst = new(int64) - } - - **dst = v - break - } - - return nil -} - -func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false or auto", - k, value) - } - } - return nil -} - -func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.DualStackEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.DualStackEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { - for _, k := range keys { - value := os.Getenv(k) - if len(value) == 0 { - continue // skip if empty - } - - switch { - case strings.EqualFold(value, "true"): - *dst = aws.FIPSEndpointStateEnabled - case strings.EqualFold(value, "false"): - *dst = aws.FIPSEndpointStateDisabled - default: - return fmt.Errorf( - "invalid value for environment variable, %s=%s, need true, false", - k, value) - } - } - return nil -} - -// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. -func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return c.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go deleted file mode 100644 index 654a7a77f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package config - -//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go -//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go deleted file mode 100644 index 480a719da..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package config - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go deleted file mode 100644 index 0810ecf16..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ /dev/null @@ -1,1209 +0,0 @@ -package config - -import ( - "context" - "io" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// LoadOptionsFunc is a type alias for LoadOptions functional option -type LoadOptionsFunc func(*LoadOptions) error - -// LoadOptions are discrete set of options that are valid for loading the -// configuration -type LoadOptions struct { - - // Region is the region to send requests to. - Region string - - // Credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // Token provider for authentication operations with bearer authentication. - BearerAuthTokenProvider smithybearer.TokenProvider - - // HTTPClient the SDK's API clients will use to invoke HTTP requests. - HTTPClient HTTPClient - - // EndpointResolver that can be used to provide or override an endpoint for - // the given service and region. - // - // See the `aws.EndpointResolver` documentation on usage. - // - // Deprecated: See EndpointResolverWithOptions - EndpointResolver aws.EndpointResolver - - // EndpointResolverWithOptions that can be used to provide or override an - // endpoint for the given service and region. - // - // See the `aws.EndpointResolverWithOptions` documentation on usage. - EndpointResolverWithOptions aws.EndpointResolverWithOptions - - // RetryMaxAttempts specifies the maximum number attempts an API client - // will call an operation that fails with a retryable error. - // - // This value will only be used if Retryer option is nil. - RetryMaxAttempts int - - // RetryMode specifies the retry model the API client will be created with. - // - // This value will only be used if Retryer option is nil. - RetryMode aws.RetryMode - - // Retryer is a function that provides a Retryer implementation. A Retryer - // guides how HTTP requests should be retried in case of recoverable - // failures. - // - // If not nil, RetryMaxAttempts, and RetryMode will be ignored. - Retryer func() aws.Retryer - - // APIOptions provides the set of middleware mutations modify how the API - // client requests will be handled. This is useful for adding additional - // tracing data to a request, or changing behavior of the SDK's client. - APIOptions []func(*middleware.Stack) error - - // Logger writer interface to write logging messages to. - Logger logging.Logger - - // ClientLogMode is used to configure the events that will be sent to the - // configured logger. This can be used to configure the logging of signing, - // retries, request, and responses of the SDK clients. - // - // See the ClientLogMode type documentation for the complete set of logging - // modes and available configuration. - ClientLogMode *aws.ClientLogMode - - // SharedConfigProfile is the profile to be used when loading the SharedConfig - SharedConfigProfile string - - // SharedConfigFiles is the slice of custom shared config files to use when - // loading the SharedConfig. A non-default profile used within config file - // must have name defined with prefix 'profile '. eg [profile xyz] - // indicates a profile with name 'xyz'. To read more on the format of the - // config file, please refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config - // - // If duplicate profiles are provided within the same, or across multiple - // shared config files, the next parsed profile will override only the - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedConfigFiles []string - - // SharedCredentialsFile is the slice of custom shared credentials files to - // use when loading the SharedConfig. The profile name used within - // credentials file must not prefix 'profile '. eg [xyz] indicates a - // profile with name 'xyz'. Profile declared as [profile xyz] will be - // ignored. To read more on the format of the credentials file, please - // refer the documentation at - // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds - // - // If duplicate profiles are provided with a same, or across multiple - // shared credentials files, the next parsed profile will override only - // properties that conflict with the previously defined profile. Note that - // if duplicate profiles are provided within the SharedCredentialsFiles and - // SharedConfigFiles, the properties defined in shared credentials file - // take precedence. - SharedCredentialsFiles []string - - // CustomCABundle is CA bundle PEM bytes reader - CustomCABundle io.Reader - - // DefaultRegion is the fall back region, used if a region was not resolved - // from other sources - DefaultRegion string - - // UseEC2IMDSRegion indicates if SDK should retrieve the region - // from the EC2 Metadata service - UseEC2IMDSRegion *UseEC2IMDSRegion - - // CredentialsCacheOptions is a function for setting the - // aws.CredentialsCacheOptions - CredentialsCacheOptions func(*aws.CredentialsCacheOptions) - - // BearerAuthTokenCacheOptions is a function for setting the smithy-go - // auth/bearer#TokenCacheOptions - BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) - - // SSOTokenProviderOptions is a function for setting the - // credentials/ssocreds.SSOTokenProviderOptions - SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) - - // ProcessCredentialOptions is a function for setting - // the processcreds.Options - ProcessCredentialOptions func(*processcreds.Options) - - // EC2RoleCredentialOptions is a function for setting - // the ec2rolecreds.Options - EC2RoleCredentialOptions func(*ec2rolecreds.Options) - - // EndpointCredentialOptions is a function for setting - // the endpointcreds.Options - EndpointCredentialOptions func(*endpointcreds.Options) - - // WebIdentityRoleCredentialOptions is a function for setting - // the stscreds.WebIdentityRoleOptions - WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) - - // AssumeRoleCredentialOptions is a function for setting the - // stscreds.AssumeRoleOptions - AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) - - // SSOProviderOptions is a function for setting - // the ssocreds.Options - SSOProviderOptions func(options *ssocreds.Options) - - // LogConfigurationWarnings when set to true, enables logging - // configuration warnings - LogConfigurationWarnings *bool - - // S3UseARNRegion specifies if the S3 service should allow ARNs to direct - // the region, the client's requests are sent to. - S3UseARNRegion *bool - - // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable - // the S3 Multi-Region access points feature. - S3DisableMultiRegionAccessPoints *bool - - // EnableEndpointDiscovery specifies if endpoint discovery is enable for - // the client. - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the EC2 IMDS service client is enabled. - // - // AWS_EC2_METADATA_DISABLED=true - EC2IMDSClientEnableState imds.ClientEnableState - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - EC2IMDSEndpoint string - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies the SDK configuration mode for defaults. - DefaultsModeOptions DefaultsModeOptions - - // The sdk app ID retrieved from env var or shared config to be added to request user agent header - AppID string - - // Specifies whether an operation request could be compressed - DisableRequestCompression *bool - - // The inclusive min bytes of a request body that could be compressed - RequestMinCompressSizeBytes *int64 - - // Whether S3 Express auth is disabled. - S3DisableExpressAuth *bool - - // Whether account id should be built into endpoint resolution - AccountIDEndpointMode aws.AccountIDEndpointMode - - // Specify if request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Specifies if response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation - - // Service endpoint override. This value is not necessarily final and is - // passed to the service's EndpointResolverV2 for further delegation. - BaseEndpoint string -} - -func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { - if len(o.DefaultsModeOptions.Mode) == 0 { - return "", false, nil - } - return o.DefaultsModeOptions.Mode, true, nil -} - -// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the -// LoadOptions and not 0. -func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - return o.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the RetryMode specified in the LoadOptions. -func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if len(o.RetryMode) == 0 { - return "", false, nil - } - return o.RetryMode, true, nil -} - -func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { - if o.DefaultsModeOptions.IMDSClient == nil { - return nil, false, nil - } - return o.DefaultsModeOptions.IMDSClient, true, nil -} - -// getRegion returns Region from config's LoadOptions -func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { - if len(o.Region) == 0 { - return "", false, nil - } - - return o.Region, true, nil -} - -// getAppID returns AppID from config's LoadOptions -func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { - return o.AppID, len(o.AppID) > 0, nil -} - -// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions -func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if o.DisableRequestCompression == nil { - return false, false, nil - } - return *o.DisableRequestCompression, true, nil -} - -// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions -func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if o.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *o.RequestMinCompressSizeBytes, true, nil -} - -func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { - return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil -} - -func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { - return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil -} - -func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { - return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil -} - -func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { - return o.BaseEndpoint, o.BaseEndpoint != "", nil -} - -// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider. -// -// The sdkID value is unused because LoadOptions only supports setting a GLOBAL -// endpoint override. In-code, per-service endpoint overrides are performed via -// functional options in service client space. -func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) { - return o.BaseEndpoint, o.BaseEndpoint != "", nil -} - -// WithRegion is a helper function to construct functional options -// that sets Region on config's LoadOptions. Setting the region to -// an empty string, will result in the region value being ignored. -// If multiple WithRegion calls are made, the last call overrides -// the previous call values. -func WithRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Region = v - return nil - } -} - -// WithAppID is a helper function to construct functional options -// that sets AppID on config's LoadOptions. -func WithAppID(ID string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AppID = ID - return nil - } -} - -// WithDisableRequestCompression is a helper function to construct functional options -// that sets DisableRequestCompression on config's LoadOptions. -func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - if DisableRequestCompression == nil { - return nil - } - o.DisableRequestCompression = DisableRequestCompression - return nil - } -} - -// WithRequestMinCompressSizeBytes is a helper function to construct functional options -// that sets RequestMinCompressSizeBytes on config's LoadOptions. -func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc { - return func(o *LoadOptions) error { - if RequestMinCompressSizeBytes == nil { - return nil - } - o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes - return nil - } -} - -// WithAccountIDEndpointMode is a helper function to construct functional options -// that sets AccountIDEndpointMode on config's LoadOptions -func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - if m != "" { - o.AccountIDEndpointMode = m - } - return nil - } -} - -// WithRequestChecksumCalculation is a helper function to construct functional options -// that sets RequestChecksumCalculation on config's LoadOptions -func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { - return func(o *LoadOptions) error { - if c > 0 { - o.RequestChecksumCalculation = c - } - return nil - } -} - -// WithResponseChecksumValidation is a helper function to construct functional options -// that sets ResponseChecksumValidation on config's LoadOptions -func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ResponseChecksumValidation = v - return nil - } -} - -// getDefaultRegion returns DefaultRegion from config's LoadOptions -func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { - if len(o.DefaultRegion) == 0 { - return "", false, nil - } - - return o.DefaultRegion, true, nil -} - -// WithDefaultRegion is a helper function to construct functional options -// that sets a DefaultRegion on config's LoadOptions. Setting the default -// region to an empty string, will result in the default region value -// being ignored. If multiple WithDefaultRegion calls are made, the last -// call overrides the previous call values. Note that both WithRegion and -// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call -// when resolving region. -func WithDefaultRegion(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.DefaultRegion = v - return nil - } -} - -// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions -func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { - if len(o.SharedConfigProfile) == 0 { - return "", false, nil - } - - return o.SharedConfigProfile, true, nil -} - -// WithSharedConfigProfile is a helper function to construct functional options -// that sets SharedConfigProfile on config's LoadOptions. Setting the shared -// config profile to an empty string, will result in the shared config profile -// value being ignored. -// If multiple WithSharedConfigProfile calls are made, the last call overrides -// the previous call values. -func WithSharedConfigProfile(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigProfile = v - return nil - } -} - -// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions -func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedConfigFiles == nil { - return nil, false, nil - } - - return o.SharedConfigFiles, true, nil -} - -// WithSharedConfigFiles is a helper function to construct functional options -// that sets slice of SharedConfigFiles on config's LoadOptions. -// Setting the shared config files to an nil string slice, will result in the -// shared config files value being ignored. -// If multiple WithSharedConfigFiles calls are made, the last call overrides -// the previous call values. -func WithSharedConfigFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedConfigFiles = v - return nil - } -} - -// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions -func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { - if o.SharedCredentialsFiles == nil { - return nil, false, nil - } - - return o.SharedCredentialsFiles, true, nil -} - -// WithSharedCredentialsFiles is a helper function to construct functional options -// that sets slice of SharedCredentialsFiles on config's LoadOptions. -// Setting the shared credentials files to an nil string slice, will result in the -// shared credentials files value being ignored. -// If multiple WithSharedCredentialsFiles calls are made, the last call overrides -// the previous call values. -func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SharedCredentialsFiles = v - return nil - } -} - -// getCustomCABundle returns CustomCABundle from LoadOptions -func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { - if o.CustomCABundle == nil { - return nil, false, nil - } - - return o.CustomCABundle, true, nil -} - -// WithCustomCABundle is a helper function to construct functional options -// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle -// to nil will result in custom CA Bundle value being ignored. -// If multiple WithCustomCABundle calls are made, the last call overrides the -// previous call values. -func WithCustomCABundle(v io.Reader) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CustomCABundle = v - return nil - } -} - -// UseEC2IMDSRegion provides a regionProvider that retrieves the region -// from the EC2 Metadata service. -type UseEC2IMDSRegion struct { - // If unset will default to generic EC2 IMDS client. - Client *imds.Client -} - -// getRegion attempts to retrieve the region from EC2 Metadata service. -func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { - if ctx == nil { - ctx = context.Background() - } - - client := p.Client - if client == nil { - client = imds.New(imds.Options{}) - } - - result, err := client.GetRegion(ctx, nil) - if err != nil { - return "", false, err - } - if len(result.Region) != 0 { - return result.Region, true, nil - } - return "", false, nil -} - -// getEC2IMDSRegion returns the value of EC2 IMDS region. -func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { - if o.UseEC2IMDSRegion == nil { - return "", false, nil - } - - return o.UseEC2IMDSRegion.getRegion(ctx) -} - -// WithEC2IMDSRegion is a helper function to construct functional options -// that enables resolving EC2IMDS region. The function takes -// in a UseEC2IMDSRegion functional option, and can be used to set the -// EC2IMDS client which will be used to resolve EC2IMDSRegion. -// If no functional option is provided, an EC2IMDS client is built and used -// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last -// call overrides the previous call values. Note that the WithRegion calls takes -// precedence over WithEC2IMDSRegion when resolving region. -func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} - - for _, fn := range fnOpts { - fn(o.UseEC2IMDSRegion) - } - return nil - } -} - -// getCredentialsProvider returns the credentials value -func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { - if o.Credentials == nil { - return nil, false, nil - } - - return o.Credentials, true, nil -} - -// WithCredentialsProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithCredentialsProvider calls are made, the last call overrides -// the previous call values. -func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Credentials = v - return nil - } -} - -// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions -func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { - if o.CredentialsCacheOptions == nil { - return nil, false, nil - } - - return o.CredentialsCacheOptions, true, nil -} - -// WithCredentialsCacheOptions is a helper function to construct functional -// options that sets a function to modify the aws.CredentialsCacheOptions the -// aws.CredentialsCache will be configured with, if the CredentialsCache is used -// by the configuration loader. -// -// If multiple WithCredentialsCacheOptions calls are made, the last call -// overrides the previous call values. -func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.CredentialsCacheOptions = v - return nil - } -} - -// getBearerAuthTokenProvider returns the credentials value -func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { - if o.BearerAuthTokenProvider == nil { - return nil, false, nil - } - - return o.BearerAuthTokenProvider, true, nil -} - -// WithBearerAuthTokenProvider is a helper function to construct functional options -// that sets Credential provider value on config's LoadOptions. If credentials -// provider is set to nil, the credentials provider value will be ignored. -// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenProvider = v - return nil - } -} - -// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { - if o.BearerAuthTokenCacheOptions == nil { - return nil, false, nil - } - - return o.BearerAuthTokenCacheOptions, true, nil -} - -// WithBearerAuthTokenCacheOptions is a helper function to construct functional options -// that sets a function to modify the TokenCacheOptions the smithy-go -// auth/bearer#TokenCache will be configured with, if the TokenCache is used by -// the configuration loader. -// -// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides -// the previous call values. -func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BearerAuthTokenCacheOptions = v - return nil - } -} - -// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions -func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { - if o.SSOTokenProviderOptions == nil { - return nil, false, nil - } - - return o.SSOTokenProviderOptions, true, nil -} - -// WithSSOTokenProviderOptions is a helper function to construct functional -// options that sets a function to modify the SSOtokenProviderOptions the SDK's -// credentials/ssocreds#SSOProvider will be configured with, if the -// SSOTokenProvider is used by the configuration loader. -// -// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOTokenProviderOptions = v - return nil - } -} - -// getProcessCredentialOptions returns the wrapped function to set processcreds.Options -func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { - if o.ProcessCredentialOptions == nil { - return nil, false, nil - } - - return o.ProcessCredentialOptions, true, nil -} - -// WithProcessCredentialOptions is a helper function to construct functional options -// that sets a function to use processcreds.Options on config's LoadOptions. -// If process credential options is set to nil, the process credential value will -// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ProcessCredentialOptions = v - return nil - } -} - -// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options -func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { - if o.EC2RoleCredentialOptions == nil { - return nil, false, nil - } - - return o.EC2RoleCredentialOptions, true, nil -} - -// WithEC2RoleCredentialOptions is a helper function to construct functional options -// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If -// EC2 role credential options is set to nil, the EC2 role credential options value -// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2RoleCredentialOptions = v - return nil - } -} - -// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options -func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { - if o.EndpointCredentialOptions == nil { - return nil, false, nil - } - - return o.EndpointCredentialOptions, true, nil -} - -// WithEndpointCredentialOptions is a helper function to construct functional options -// that sets a function to use endpointcreds.Options on config's LoadOptions. If -// endpoint credential options is set to nil, the endpoint credential options -// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, -// the last call overrides the previous call values. -func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointCredentialOptions = v - return nil - } -} - -// getWebIdentityRoleCredentialOptions returns the wrapped function -func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { - if o.WebIdentityRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.WebIdentityRoleCredentialOptions, true, nil -} - -// WithWebIdentityRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.WebIdentityRoleOptions -// on config's LoadOptions. If web identity role credentials options is set to nil, -// the web identity role credentials value will be ignored. If multiple -// WithWebIdentityRoleCredentialOptions calls are made, the last call -// overrides the previous call values. -func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.WebIdentityRoleCredentialOptions = v - return nil - } -} - -// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { - if o.AssumeRoleCredentialOptions == nil { - return nil, false, nil - } - - return o.AssumeRoleCredentialOptions, true, nil -} - -// WithAssumeRoleCredentialOptions is a helper function to construct -// functional options that sets a function to use stscreds.AssumeRoleOptions -// on config's LoadOptions. If assume role credentials options is set to nil, -// the assume role credentials value will be ignored. If multiple -// WithAssumeRoleCredentialOptions calls are made, the last call overrides -// the previous call values. -func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.AssumeRoleCredentialOptions = v - return nil - } -} - -func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { - if o.HTTPClient == nil { - return nil, false, nil - } - - return o.HTTPClient, true, nil -} - -// WithHTTPClient is a helper function to construct functional options -// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, -// the HTTPClient value will be ignored. -// If multiple WithHTTPClient calls are made, the last call overrides -// the previous call values. -func WithHTTPClient(v HTTPClient) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.HTTPClient = v - return nil - } -} - -func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { - if o.APIOptions == nil { - return nil, false, nil - } - - return o.APIOptions, true, nil -} - -// WithAPIOptions is a helper function to construct functional options -// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the -// APIOptions value is ignored. If multiple WithAPIOptions calls are -// made, the last call overrides the previous call values. -func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { - return func(o *LoadOptions) error { - if v == nil { - return nil - } - - o.APIOptions = append(o.APIOptions, v...) - return nil - } -} - -func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { - if o.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return o.RetryMaxAttempts, true, nil -} - -// WithRetryMaxAttempts is a helper function to construct functional options that sets -// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is -// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMaxAttempts(v int) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMaxAttempts = v - return nil - } -} - -func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { - if o.RetryMode == "" { - return "", false, nil - } - - return o.RetryMode, true, nil -} - -// WithRetryMode is a helper function to construct functional options that sets -// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is -// ignored. If multiple WithRetryMode calls are made, the last call overrides -// the previous call values. -// -// Will be ignored of LoadOptions.Retryer or WithRetryer are used. -func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.RetryMode = v - return nil - } -} - -func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { - if o.Retryer == nil { - return nil, false, nil - } - - return o.Retryer, true, nil -} - -// WithRetryer is a helper function to construct functional options -// that sets Retryer on LoadOptions. If Retryer is set to nil, the -// Retryer value is ignored. If multiple WithRetryer calls are -// made, the last call overrides the previous call values. -func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Retryer = v - return nil - } -} - -func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { - if o.EndpointResolver == nil { - return nil, false, nil - } - - return o.EndpointResolver, true, nil -} - -// WithEndpointResolver is a helper function to construct functional options -// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -// -// Deprecated: The global endpoint resolution interface is deprecated. The API -// for endpoint resolution is now unique to each service and is set via the -// EndpointResolverV2 field on service client options. Use of -// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you -// from using any endpoint-related service features released after the -// introduction of EndpointResolverV2. You may also encounter broken or -// unexpected behavior when using the old global interface with services that -// use many endpoint-related customizations such as S3. -func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolver = v - return nil - } -} - -func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { - if o.EndpointResolverWithOptions == nil { - return nil, false, nil - } - - return o.EndpointResolverWithOptions, true, nil -} - -// WithEndpointResolverWithOptions is a helper function to construct functional options -// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, -// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls -// are made, the last call overrides the previous call values. -// -// Deprecated: The global endpoint resolution interface is deprecated. See -// deprecation docs on [WithEndpointResolver]. -func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EndpointResolverWithOptions = v - return nil - } -} - -func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { - if o.Logger == nil { - return nil, false, nil - } - - return o.Logger, true, nil -} - -// WithLogger is a helper function to construct functional options -// that sets Logger on LoadOptions. If Logger is set to nil, the -// Logger value will be ignored. If multiple WithLogger calls are made, -// the last call overrides the previous call values. -func WithLogger(v logging.Logger) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.Logger = v - return nil - } -} - -func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { - if o.ClientLogMode == nil { - return 0, false, nil - } - - return *o.ClientLogMode, true, nil -} - -// WithClientLogMode is a helper function to construct functional options -// that sets client log mode on LoadOptions. If client log mode is set to nil, -// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, -// the last call overrides the previous call values. -func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.ClientLogMode = &v - return nil - } -} - -func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { - if o.LogConfigurationWarnings == nil { - return false, false, nil - } - return *o.LogConfigurationWarnings, true, nil -} - -// WithLogConfigurationWarnings is a helper function to construct -// functional options that can be used to set LogConfigurationWarnings -// on LoadOptions. -// -// If multiple WithLogConfigurationWarnings calls are made, the last call -// overrides the previous call values. -func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.LogConfigurationWarnings = &v - return nil - } -} - -// GetS3UseARNRegion returns whether to allow ARNs to direct the region -// the S3 client's requests are sent to. -func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { - if o.S3UseARNRegion == nil { - return false, false, nil - } - return *o.S3UseARNRegion, true, nil -} - -// WithS3UseARNRegion is a helper function to construct functional options -// that can be used to set S3UseARNRegion on LoadOptions. -// If multiple WithS3UseARNRegion calls are made, the last call overrides -// the previous call values. -func WithS3UseARNRegion(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3UseARNRegion = &v - return nil - } -} - -// GetS3DisableMultiRegionAccessPoints returns whether to disable -// the S3 multi-region access points feature. -func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { - if o.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - return *o.S3DisableMultiRegionAccessPoints, true, nil -} - -// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options -// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. -// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides -// the previous call values. -func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableMultiRegionAccessPoints = &v - return nil - } -} - -// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. -func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - return o.EnableEndpointDiscovery, true, nil -} - -// WithEndpointDiscovery is a helper function to construct functional options -// that can be used to enable endpoint discovery on LoadOptions for supported clients. -// If multiple WithEndpointDiscovery calls are made, the last call overrides -// the previous call values. -func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EnableEndpointDiscovery = v - return nil - } -} - -// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions -func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { - if o.SSOProviderOptions == nil { - return nil, false, nil - } - - return o.SSOProviderOptions, true, nil -} - -// WithSSOProviderOptions is a helper function to construct -// functional options that sets a function to use ssocreds.Options -// on config's LoadOptions. If the SSO credential provider options is set to nil, -// the sso provider options value will be ignored. If multiple -// WithSSOProviderOptions calls are made, the last call overrides -// the previous call values. -func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.SSOProviderOptions = v - return nil - } -} - -// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. -func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { - if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { - return imds.ClientDefaultEnableState, false, nil - } - - return o.EC2IMDSClientEnableState, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return o.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { - if len(o.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return o.EC2IMDSEndpoint, true, nil -} - -// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. -func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSClientEnableState = v - return nil - } -} - -// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. -func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpointMode = v - return nil - } -} - -// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. -func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.EC2IMDSEndpoint = v - return nil - } -} - -// WithUseDualStackEndpoint is a helper function to construct -// functional options that can be used to set UseDualStackEndpoint on LoadOptions. -func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseDualStackEndpoint = v - return nil - } -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - return o.UseDualStackEndpoint, true, nil -} - -// WithUseFIPSEndpoint is a helper function to construct -// functional options that can be used to set UseFIPSEndpoint on LoadOptions. -func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.UseFIPSEndpoint = v - return nil - } -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - return o.UseFIPSEndpoint, true, nil -} - -// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. -// -// Zero or more functional options can be provided to provide configuration options for performing -// environment discovery when using aws.DefaultsModeAuto. -func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { - do := DefaultsModeOptions{ - Mode: mode, - } - for _, fn := range optFns { - fn(&do) - } - return func(options *LoadOptions) error { - options.DefaultsModeOptions = do - return nil - } -} - -// GetS3DisableExpressAuth returns the configured value for -// [EnvConfig.S3DisableExpressAuth]. -func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) { - if o.S3DisableExpressAuth == nil { - return false, false - } - - return *o.S3DisableExpressAuth, true -} - -// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth] -// to the value provided. -func WithS3DisableExpressAuth(v bool) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.S3DisableExpressAuth = &v - return nil - } -} - -// WithBaseEndpoint is a helper function to construct functional options that -// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and -// subsequent calls to this API override previous ones. -// -// This is an in-code setting, therefore, any value set using this hook takes -// precedence over and will override ALL environment and shared config -// directives that set endpoint URLs. Functional options on service clients -// have higher specificity, and functional options that modify the value of -// BaseEndpoint on a client will take precedence over this setting. -func WithBaseEndpoint(v string) LoadOptionsFunc { - return func(o *LoadOptions) error { - o.BaseEndpoint = v - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go deleted file mode 100644 index b629137c8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go +++ /dev/null @@ -1,51 +0,0 @@ -package config - -import ( - "fmt" - "net" - "net/url" -) - -var lookupHostFn = net.LookupHost - -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil - } - - // Host is not an ip, perform lookup - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - if len(addrs) == 0 { - return false, fmt.Errorf("no addrs found for host, %s", host) - } - - for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { - return false, nil - } - } - - return true, nil -} - -func validateLocalURL(v string) error { - u, err := url.Parse(v) - if err != nil { - return err - } - - host := u.Hostname() - if len(host) == 0 { - return fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if isLoopback, err := isLoopbackHost(host); err != nil { - return fmt.Errorf("failed to resolve host %q, %v", host, err) - } else if !isLoopback { - return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go deleted file mode 100644 index a8ff40d84..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ /dev/null @@ -1,755 +0,0 @@ -package config - -import ( - "context" - "io" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - smithybearer "github.com/aws/smithy-go/auth/bearer" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// sharedConfigProfileProvider provides access to the shared config profile -// name external configuration value. -type sharedConfigProfileProvider interface { - getSharedConfigProfile(ctx context.Context) (string, bool, error) -} - -// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigProfileProvider); ok { - value, found, err = p.getSharedConfigProfile(ctx) - if err != nil || found { - break - } - } - } - return -} - -// sharedConfigFilesProvider provides access to the shared config filesnames -// external configuration value. -type sharedConfigFilesProvider interface { - getSharedConfigFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedConfigFilesProvider); ok { - value, found, err = p.getSharedConfigFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// sharedCredentialsFilesProvider provides access to the shared credentials filesnames -// external configuration value. -type sharedCredentialsFilesProvider interface { - getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) -} - -// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(sharedCredentialsFilesProvider); ok { - value, found, err = p.getSharedCredentialsFiles(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// customCABundleProvider provides access to the custom CA bundle PEM bytes. -type customCABundleProvider interface { - getCustomCABundle(ctx context.Context) (io.Reader, bool, error) -} - -// getCustomCABundle searches the configs for a customCABundleProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(customCABundleProvider); ok { - value, found, err = p.getCustomCABundle(ctx) - if err != nil || found { - break - } - } - } - - return -} - -// regionProvider provides access to the region external configuration value. -type regionProvider interface { - getRegion(ctx context.Context) (string, bool, error) -} - -// getRegion searches the configs for a regionProvider and returns the value -// if found. Returns an error if a provider fails before a value is found. -func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(regionProvider); ok { - value, found, err = p.getRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -type baseEndpointProvider interface { - getBaseEndpoint(ctx context.Context) (string, bool, error) -} - -func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(baseEndpointProvider); ok { - value, found, err = p.getBaseEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -type servicesObjectProvider interface { - getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) -} - -func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(servicesObjectProvider); ok { - value, found, err = p.getServicesObject(ctx) - if err != nil || found { - break - } - } - } - return -} - -// appIDProvider provides access to the sdk app ID value -type appIDProvider interface { - getAppID(ctx context.Context) (string, bool, error) -} - -func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(appIDProvider); ok { - value, found, err = p.getAppID(ctx) - if err != nil || found { - break - } - } - } - return -} - -// disableRequestCompressionProvider provides access to the DisableRequestCompression -type disableRequestCompressionProvider interface { - getDisableRequestCompression(context.Context) (bool, bool, error) -} - -func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(disableRequestCompressionProvider); ok { - value, found, err = p.getDisableRequestCompression(ctx) - if err != nil || found { - break - } - } - } - return -} - -// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes -type requestMinCompressSizeBytesProvider interface { - getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) -} - -func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok { - value, found, err = p.getRequestMinCompressSizeBytes(ctx) - if err != nil || found { - break - } - } - } - return -} - -// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode -type accountIDEndpointModeProvider interface { - getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) -} - -func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(accountIDEndpointModeProvider); ok { - value, found, err = p.getAccountIDEndpointMode(ctx) - if err != nil || found { - break - } - } - } - return -} - -// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation -type requestChecksumCalculationProvider interface { - getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) -} - -func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(requestChecksumCalculationProvider); ok { - value, found, err = p.getRequestChecksumCalculation(ctx) - if err != nil || found { - break - } - } - } - return -} - -// responseChecksumValidationProvider provides access to the ResponseChecksumValidation -type responseChecksumValidationProvider interface { - getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) -} - -func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(responseChecksumValidationProvider); ok { - value, found, err = p.getResponseChecksumValidation(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2IMDSRegionProvider provides access to the ec2 imds region -// configuration value -type ec2IMDSRegionProvider interface { - getEC2IMDSRegion(ctx context.Context) (string, bool, error) -} - -// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and -// returns the value if found. Returns an error if a provider fails before -// a value is found. -func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(ec2IMDSRegionProvider); ok { - region, found, err = provider.getEC2IMDSRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsProviderProvider provides access to the credentials external -// configuration value. -type credentialsProviderProvider interface { - getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) -} - -// getCredentialsProvider searches the configs for a credentialsProviderProvider -// and returns the value if found. Returns an error if a provider fails before a -// value is found. -func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(credentialsProviderProvider); ok { - p, found, err = provider.getCredentialsProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// credentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -type credentialsCacheOptionsProvider interface { - getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) -} - -// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting -// the aws.CredentialsCacheOptions. -func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( - f func(*aws.CredentialsCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(credentialsCacheOptionsProvider); ok { - f, found, err = p.getCredentialsCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenProviderProvider provides access to the bearer authentication -// token external configuration value. -type bearerAuthTokenProviderProvider interface { - getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) -} - -// getBearerAuthTokenProvider searches the config sources for a -// bearerAuthTokenProviderProvider and returns the value if found. Returns an -// error if a provider fails before a value is found. -func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { - for _, cfg := range configs { - if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { - p, found, err = provider.getBearerAuthTokenProvider(ctx) - if err != nil || found { - break - } - } - } - return -} - -// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -type bearerAuthTokenCacheOptionsProvider interface { - getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) -} - -// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for -// setting the smithy-go auth/bearer#TokenCacheOptions. -func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( - f func(*smithybearer.TokenCacheOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { - f, found, err = p.getBearerAuthTokenCacheOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -type ssoTokenProviderOptionsProvider interface { - getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) -} - -// getSSOTokenProviderOptions is an interface for retrieving a function for -// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. -func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( - f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, -) { - for _, config := range configs { - if p, ok := config.(ssoTokenProviderOptionsProvider); ok { - f, found, err = p.getSSOTokenProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoTokenProviderOptionsProvider - -// processCredentialOptions is an interface for retrieving a function for setting -// the processcreds.Options. -type processCredentialOptions interface { - getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) -} - -// getProcessCredentialOptions searches the slice of configs and returns the first function found -func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(processCredentialOptions); ok { - f, found, err = p.getProcessCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ec2RoleCredentialOptionsProvider is an interface for retrieving a function -// for setting the ec2rolecreds.Provider options. -type ec2RoleCredentialOptionsProvider interface { - getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) -} - -// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { - f, found, err = p.getEC2RoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources -type defaultRegionProvider interface { - getDefaultRegion(ctx context.Context) (string, bool, error) -} - -// getDefaultRegion searches the slice of configs and returns the first fallback region found -func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { - for _, config := range configs { - if p, ok := config.(defaultRegionProvider); ok { - value, found, err = p.getDefaultRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointCredentialOptionsProvider is an interface for retrieving a function for setting -// the endpointcreds.ProviderOptions. -type endpointCredentialOptionsProvider interface { - getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) -} - -// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found -func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { - for _, config := range configs { - if p, ok := config.(endpointCredentialOptionsProvider); ok { - f, found, err = p.getEndpointCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.WebIdentityRoleProvider. -type webIdentityRoleCredentialOptionsProvider interface { - getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) -} - -// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found -func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { - f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting -// the stscreds.AssumeRoleOptions. -type assumeRoleCredentialOptionsProvider interface { - getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) -} - -// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found -func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { - for _, config := range configs { - if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { - f, found, err = p.getAssumeRoleCredentialOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// HTTPClient is an HTTP client implementation -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// httpClientProvider is an interface for retrieving HTTPClient -type httpClientProvider interface { - getHTTPClient(ctx context.Context) (HTTPClient, bool, error) -} - -// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs -func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { - for _, config := range configs { - if p, ok := config.(httpClientProvider); ok { - client, found, err = p.getHTTPClient(ctx) - if err != nil || found { - break - } - } - } - return -} - -// apiOptionsProvider is an interface for retrieving APIOptions -type apiOptionsProvider interface { - getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) -} - -// getAPIOptions searches the slice of configs and returns the APIOptions set on configs -func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { - for _, config := range configs { - if p, ok := config.(apiOptionsProvider); ok { - // retrieve APIOptions from configs and set it on cfg - apiOptions, found, err = p.getAPIOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source -type endpointResolverProvider interface { - getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverProvider); ok { - f, found, err = p.getEndpointResolver(ctx) - if err != nil || found { - break - } - } - } - return -} - -// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source -type endpointResolverWithOptionsProvider interface { - getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) -} - -// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used -// to configure the aws.Config.EndpointResolver value. -func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { - for _, c := range configs { - if p, ok := c.(endpointResolverWithOptionsProvider); ok { - f, found, err = p.getEndpointResolverWithOptions(ctx) - if err != nil || found { - break - } - } - } - return -} - -// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. -type loggerProvider interface { - getLogger(ctx context.Context) (logging.Logger, bool, error) -} - -// getLogger searches the provided config sources for a logging.Logger that can be used -// to configure the aws.Config.Logger value. -func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { - for _, c := range configs { - if p, ok := c.(loggerProvider); ok { - l, found, err = p.getLogger(ctx) - if err != nil || found { - break - } - } - } - return -} - -// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. -type clientLogModeProvider interface { - getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) -} - -func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(clientLogModeProvider); ok { - m, found, err = p.getClientLogMode(ctx) - if err != nil || found { - break - } - } - } - return -} - -// retryProvider is an configuration provider for custom Retryer. -type retryProvider interface { - getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) -} - -func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryProvider); ok { - v, found, err = p.getRetryer(ctx) - if err != nil || found { - break - } - } - } - return -} - -// logConfigurationWarningsProvider is an configuration provider for -// retrieving a boolean indicating whether configuration issues should -// be logged when loading from config sources -type logConfigurationWarningsProvider interface { - getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) -} - -func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { - for _, c := range configs { - if p, ok := c.(logConfigurationWarningsProvider); ok { - v, found, err = p.getLogConfigurationWarnings(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ssoCredentialOptionsProvider is an interface for retrieving a function for setting -// the ssocreds.Options. -type ssoCredentialOptionsProvider interface { - getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) -} - -func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { - for _, c := range configs { - if p, ok := c.(ssoCredentialOptionsProvider); ok { - v, found, err = p.getSSOProviderOptions(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeIMDSClientProvider interface { - getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) -} - -func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeIMDSClientProvider); ok { - v, found, err = p.getDefaultsModeIMDSClient(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type defaultsModeProvider interface { - getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) -} - -func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(defaultsModeProvider); ok { - v, found, err = p.getDefaultsMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryMaxAttemptsProvider interface { - GetRetryMaxAttempts(context.Context) (int, bool, error) -} - -func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryMaxAttemptsProvider); ok { - v, found, err = p.GetRetryMaxAttempts(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} - -type retryModeProvider interface { - GetRetryMode(context.Context) (aws.RetryMode, bool, error) -} - -func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { - for _, c := range configs { - if p, ok := c.(retryModeProvider); ok { - v, found, err = p.GetRetryMode(ctx) - if err != nil || found { - break - } - } - } - return v, found, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go deleted file mode 100644 index a68bd0993..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ /dev/null @@ -1,413 +0,0 @@ -package config - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "os" - - "github.com/aws/aws-sdk-go-v2/aws" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/smithy-go/logging" -) - -// resolveDefaultAWSConfig will write default configuration values into the cfg -// value. It will write the default values, overwriting any previous value. -// -// This should be used as the first resolver in the slice of resolvers when -// resolving external configuration. -func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { - var sources []interface{} - for _, s := range cfgs { - sources = append(sources, s) - } - - *cfg = aws.Config{ - Logger: logging.NewStandardLogger(os.Stderr), - ConfigSources: sources, - } - return nil -} - -// resolveCustomCABundle extracts the first instance of a custom CA bundle filename -// from the external configurations. It will update the HTTP Client's builder -// to be configured with the custom CA bundle. -// -// Config provider used: -// * customCABundleProvider -func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { - pemCerts, found, err := getCustomCABundle(ctx, cfgs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - if cfg.HTTPClient == nil { - cfg.HTTPClient = awshttp.NewBuildableClient() - } - - trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ - "has no WithTransportOptions, %T", cfg.HTTPClient) - } - - var appendErr error - client := trOpts.WithTransportOptions(func(tr *http.Transport) { - if tr.TLSClientConfig == nil { - tr.TLSClientConfig = &tls.Config{} - } - if tr.TLSClientConfig.RootCAs == nil { - tr.TLSClientConfig.RootCAs = x509.NewCertPool() - } - - b, err := ioutil.ReadAll(pemCerts) - if err != nil { - appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") - } - - if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { - appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") - } - }) - if appendErr != nil { - return appendErr - } - - cfg.HTTPClient = client - return err -} - -// resolveRegion extracts the first instance of a Region from the configs slice. -// -// Config providers used: -// * regionProvider -func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - v, found, err := getRegion(ctx, configs) - if err != nil { - // TODO error handling, What is the best way to handle this? - // capture previous errors continue. error out if all errors - return err - } - if !found { - return nil - } - - cfg.Region = v - return nil -} - -func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { - var downcastCfgSources []interface{} - for _, cs := range configs { - downcastCfgSources = append(downcastCfgSources, interface{}(cs)) - } - - if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { - cfg.BaseEndpoint = nil - return nil - } - - v, found, err := getBaseEndpoint(ctx, configs) - if err != nil { - return err - } - - if !found { - return nil - } - cfg.BaseEndpoint = aws.String(v) - return nil -} - -// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var -func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { - ID, _, err := getAppID(ctx, configs) - if err != nil { - return err - } - - cfg.AppID = ID - return nil -} - -// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's -// SharedConfig or EnvConfig -func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error { - disable, _, err := getDisableRequestCompression(ctx, configs) - if err != nil { - return err - } - - cfg.DisableRequestCompression = disable - return nil -} - -// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's -// SharedConfig or EnvConfig -func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error { - minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs) - if err != nil { - return err - } - // must set a default min size 10240 if not configured - if !found { - minBytes = 10240 - } - cfg.RequestMinCompressSizeBytes = minBytes - return nil -} - -// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's -// SharedConfig or EnvConfig -func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { - m, found, err := getAccountIDEndpointMode(ctx, configs) - if err != nil { - return err - } - - if !found { - m = aws.AccountIDEndpointModePreferred - } - - cfg.AccountIDEndpointMode = m - return nil -} - -// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's -// SharedConfig or EnvConfig -func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getRequestChecksumCalculation(ctx, configs) - if err != nil { - return err - } - - if !found { - c = aws.RequestChecksumCalculationWhenSupported - } - cfg.RequestChecksumCalculation = c - return nil -} - -// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's -// SharedConfig or EnvConfig -func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getResponseChecksumValidation(ctx, configs) - if err != nil { - return err - } - - if !found { - c = aws.ResponseChecksumValidationWhenSupported - } - cfg.ResponseChecksumValidation = c - return nil -} - -// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default -// region if region had not been resolved from other sources. -func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - v, found, err := getDefaultRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = v - - return nil -} - -// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance -// if one has not been resolved from other sources. -func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { - c, found, err := getHTTPClient(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.HTTPClient = c - return nil -} - -// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options -// if one has not been resolved from other sources. -func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - o, found, err := getAPIOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.APIOptions = o - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolver(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolver = endpointResolver - - return nil -} - -// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice -// and sets the functions result on the aws.Config.EndpointResolver -func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.EndpointResolverWithOptions = endpointResolver - - return nil -} - -func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { - logger, found, err := getLogger(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Logger = logger - - return nil -} - -func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { - mode, found, err := getClientLogMode(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.ClientLogMode = mode - - return nil -} - -func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { - retryer, found, err := getRetryer(ctx, configs) - if err != nil { - return err - } - - if found { - cfg.Retryer = retryer - return nil - } - - // Only load the retry options if a custom retryer has not be specified. - if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { - return err - } - return resolveRetryMode(ctx, cfg, configs) -} - -func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { - if len(cfg.Region) > 0 { - return nil - } - - region, found, err := getEC2IMDSRegion(ctx, configs) - if err != nil { - return err - } - if !found { - return nil - } - - cfg.Region = region - - return nil -} - -func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { - defaultsMode, found, err := getDefaultsMode(ctx, configs) - if err != nil { - return err - } - if !found { - defaultsMode = aws.DefaultsModeLegacy - } - - var environment aws.RuntimeEnvironment - if defaultsMode == aws.DefaultsModeAuto { - envConfig, _, _ := getAWSConfigSources(configs) - - client, found, err := getDefaultsModeIMDSClient(ctx, configs) - if err != nil { - return err - } - if !found { - client = imds.NewFromConfig(*cfg) - } - - environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) - if err != nil { - return err - } - } - - cfg.DefaultsMode = defaultsMode - cfg.RuntimeEnvironment = environment - - return nil -} - -func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { - maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMaxAttempts = maxAttempts - - return nil -} - -func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { - retryMode, found, err := getRetryMode(ctx, configs) - if err != nil || !found { - return err - } - cfg.RetryMode = retryMode - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go deleted file mode 100644 index a8ebb3c0a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go +++ /dev/null @@ -1,122 +0,0 @@ -package config - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - smithybearer "github.com/aws/smithy-go/auth/bearer" -) - -// resolveBearerAuthToken extracts a token provider from the config sources. -// -// If an explicit bearer authentication token provider is not found the -// resolver will fallback to resolving token provider via other config sources -// such as SharedConfig. -func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) -} - -// resolveBearerAuthTokenProvider extracts the first instance of -// BearerAuthTokenProvider from the config sources. -// -// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure -// the Token is only refreshed when needed. This also protects the -// TokenProvider so it can be used concurrently. -// -// Config providers used: -// * bearerAuthTokenProviderProvider -func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, tokenProvider) - if err != nil { - return false, err - } - - return true, nil -} - -func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - _, sharedConfig, _ := getAWSConfigSources(configs) - - var provider smithybearer.TokenProvider - - if sharedConfig.SSOSession != nil { - provider, err = resolveBearerAuthSSOTokenProvider( - ctx, cfg, sharedConfig.SSOSession, configs) - } - - if err == nil && provider != nil { - cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( - ctx, configs, provider) - } - - return err -} - -func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - - cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) - if err != nil { - return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) - } - - client := ssooidc.NewFromConfig(*cfg) - provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) - - return provider, nil -} - -// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go -// bearer/auth#TokenCache with the provided options if the provider is not -// already a TokenCache. -func wrapWithBearerAuthTokenCache( - ctx context.Context, - cfgs configs, - provider smithybearer.TokenProvider, - optFns ...func(*smithybearer.TokenCacheOptions), -) (smithybearer.TokenProvider, error) { - _, ok := provider.(*smithybearer.TokenCache) - if ok { - return provider, nil - } - - tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) - if err != nil { - return nil, err - } - - opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) - opts = append(opts, func(o *smithybearer.TokenCacheOptions) { - o.RefreshBeforeExpires = 5 * time.Minute - o.RetrieveBearerTokenTimeout = 30 * time.Second - }) - opts = append(opts, optFns...) - if optionsFound { - opts = append(opts, tokenCacheConfigOptions) - } - - return smithybearer.NewTokenCache(provider, opts...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go deleted file mode 100644 index 7ae252e2e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ /dev/null @@ -1,569 +0,0 @@ -package config - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "net/url" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" - "github.com/aws/aws-sdk-go-v2/credentials/processcreds" - "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/service/sso" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -const ( - // valid credential source values - credSourceEc2Metadata = "Ec2InstanceMetadata" - credSourceEnvironment = "Environment" - credSourceECSContainer = "EcsContainer" - httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" -) - -// direct representation of the IPv4 address for the ECS container -// "169.254.170.2" -var ecsContainerIPv4 net.IP = []byte{ - 169, 254, 170, 2, -} - -// direct representation of the IPv4 address for the EKS container -// "169.254.170.23" -var eksContainerIPv4 net.IP = []byte{ - 169, 254, 170, 23, -} - -// direct representation of the IPv6 address for the EKS container -// "fd00:ec2::23" -var eksContainerIPv6 net.IP = []byte{ - 0xFD, 0, 0xE, 0xC2, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0x23, -} - -var ( - ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing -) - -// resolveCredentials extracts a credential provider from slice of config -// sources. -// -// If an explicit credential provider is not found the resolver will fallback -// to resolving credentials by extracting a credential provider from EnvConfig -// and SharedConfig. -func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - found, err := resolveCredentialProvider(ctx, cfg, configs) - if found || err != nil { - return err - } - - return resolveCredentialChain(ctx, cfg, configs) -} - -// resolveCredentialProvider extracts the first instance of Credentials from the -// config slices. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -// -// Config providers used: -// * credentialsProviderProvider -func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { - credProvider, found, err := getCredentialsProvider(ctx, configs) - if !found || err != nil { - return false, err - } - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) - if err != nil { - return false, err - } - - return true, nil -} - -// resolveCredentialChain resolves a credential provider chain using EnvConfig -// and SharedConfig if present in the slice of provided configs. -// -// The resolved CredentialProvider will be wrapped in a cache to ensure the -// credentials are only refreshed when needed. This also protects the -// credential provider to be used concurrently. -func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { - envConfig, sharedConfig, other := getAWSConfigSources(configs) - - // When checking if a profile was specified programmatically we should only consider the "other" - // configuration sources that have been provided. This ensures we correctly honor the expected credential - // hierarchy. - _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) - if err != nil { - return err - } - - switch { - case sharedProfileSet: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - case envConfig.Credentials.HasKeys(): - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} - case len(envConfig.WebIdentityTokenFilePath) > 0: - err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) - default: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) - } - if err != nil { - return err - } - - // Wrap the resolved provider in a cache so the SDK will cache credentials. - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { - - switch { - case sharedConfig.Source != nil: - // Assume IAM role with credentials source from a different profile. - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) - - case sharedConfig.Credentials.HasKeys(): - // Static Credentials from Shared Config/Credentials file. - cfg.Credentials = credentials.StaticCredentialsProvider{ - Value: sharedConfig.Credentials, - } - - case len(sharedConfig.CredentialSource) != 0: - err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) - - case len(sharedConfig.WebIdentityTokenFile) != 0: - // Credentials from Assume Web Identity token require an IAM Role, and - // that roll will be assumed. May be wrapped with another assume role - // via SourceProfile. - return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) - - case sharedConfig.hasSSOConfiguration(): - err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) - - case len(sharedConfig.CredentialProcess) != 0: - // Get credentials from CredentialProcess - err = processCredentials(ctx, cfg, sharedConfig, configs) - - case len(envConfig.ContainerCredentialsRelativePath) != 0: - err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - - case len(envConfig.ContainerCredentialsEndpoint) != 0: - err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - - default: - err = resolveEC2RoleCredentials(ctx, cfg, configs) - } - if err != nil { - return err - } - - if len(sharedConfig.RoleARN) > 0 { - return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) - } - - return nil -} - -func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - if err := sharedConfig.validateSSOConfiguration(); err != nil { - return err - } - - var options []func(*ssocreds.Options) - v, found, err := getSSOProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - options = append(options, v) - } - - cfgCopy := cfg.Copy() - - if sharedConfig.SSOSession != nil { - ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) - if err != nil { - return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) - } - var optFns []func(*ssocreds.SSOTokenProviderOptions) - if found { - optFns = append(optFns, ssoTokenProviderOptionsFn) - } - cfgCopy.Region = sharedConfig.SSOSession.SSORegion - cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) - if err != nil { - return err - } - oidcClient := ssooidc.NewFromConfig(cfgCopy) - tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) - options = append(options, func(o *ssocreds.Options) { - o.SSOTokenProvider = tokenProvider - o.CachedTokenFilepath = cachedPath - }) - } else { - cfgCopy.Region = sharedConfig.SSORegion - } - - cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) - - return nil -} - -func ecsContainerURI(path string) string { - return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) -} - -func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { - var opts []func(*processcreds.Options) - - options, found, err := getProcessCredentialOptions(ctx, configs) - if err != nil { - return err - } - if found { - opts = append(opts, options) - } - - cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) - - return nil -} - -// isAllowedHost allows host to be loopback or known ECS/EKS container IPs -// -// host can either be an IP address OR an unresolved hostname - resolution will -// be automatically performed in the latter case -func isAllowedHost(host string) (bool, error) { - if ip := net.ParseIP(host); ip != nil { - return isIPAllowed(ip), nil - } - - addrs, err := lookupHostFn(host) - if err != nil { - return false, err - } - - for _, addr := range addrs { - if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { - return false, nil - } - } - - return true, nil -} - -func isIPAllowed(ip net.IP) bool { - return ip.IsLoopback() || - ip.Equal(ecsContainerIPv4) || - ip.Equal(eksContainerIPv4) || - ip.Equal(eksContainerIPv6) -} - -func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { - var resolveErr error - - parsed, err := url.Parse(endpointURL) - if err != nil { - resolveErr = fmt.Errorf("invalid URL, %w", err) - } else { - host := parsed.Hostname() - if len(host) == 0 { - resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") - } else if parsed.Scheme == "http" { - if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { - resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr) - } else if !isAllowedHost { - resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host) - } - } - } - - if resolveErr != nil { - return resolveErr - } - - return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) -} - -func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { - optFns := []func(*endpointcreds.Options){ - func(options *endpointcreds.Options) { - if len(authToken) != 0 { - options.AuthorizationToken = authToken - } - if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { - options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { - var contents []byte - var err error - if contents, err = ioutil.ReadFile(authFilePath); err != nil { - return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) - } - return string(contents), nil - }) - } - options.APIOptions = cfg.APIOptions - if cfg.Retryer != nil { - options.Retryer = cfg.Retryer() - } - }, - } - - optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - provider := endpointcreds.New(url, optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { - options.ExpiryWindow = 5 * time.Minute - }) - if err != nil { - return err - } - - return nil -} - -func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { - switch sharedCfg.CredentialSource { - case credSourceEc2Metadata: - return resolveEC2RoleCredentials(ctx, cfg, configs) - - case credSourceEnvironment: - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} - - case credSourceECSContainer: - if len(envConfig.ContainerCredentialsRelativePath) != 0 { - return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) - } - if len(envConfig.ContainerCredentialsEndpoint) != 0 { - return resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - } - return fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") - - default: - return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") - } - - return nil -} - -func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { - optFns := make([]func(*ec2rolecreds.Options), 0, 2) - - optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - optFns = append(optFns, func(o *ec2rolecreds.Options) { - // Only define a client from config if not already defined. - if o.Client == nil { - o.Client = imds.NewFromConfig(*cfg) - } - }) - - provider := ec2rolecreds.New(optFns...) - - cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) - if err != nil { - return err - } - - return nil -} - -func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { - var ( - envConfig *EnvConfig - sharedConfig *SharedConfig - other configs - ) - - for i := range cfgs { - switch c := cfgs[i].(type) { - case EnvConfig: - if envConfig == nil { - envConfig = &c - } - case *EnvConfig: - if envConfig == nil { - envConfig = c - } - case SharedConfig: - if sharedConfig == nil { - sharedConfig = &c - } - case *SharedConfig: - if envConfig == nil { - sharedConfig = c - } - default: - other = append(other, c) - } - } - - if envConfig == nil { - envConfig = &EnvConfig{} - } - - if sharedConfig == nil { - sharedConfig = &SharedConfig{} - } - - return envConfig, sharedConfig, other -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a -// session when the MFAToken option is not set when shared config is configured -// load assume a role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Error is the error message -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} - -func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { - if len(filepath) == 0 { - return fmt.Errorf("token file path is not set") - } - - optFns := []func(*stscreds.WebIdentityRoleOptions){ - func(options *stscreds.WebIdentityRoleOptions) { - options.RoleSessionName = sessionName - }, - } - - optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - - if found { - optFns = append(optFns, optFn) - } - - opts := stscreds.WebIdentityRoleOptions{ - RoleARN: roleARN, - } - - for _, fn := range optFns { - fn(&opts) - } - - if len(opts.RoleARN) == 0 { - return fmt.Errorf("role ARN is not set") - } - - client := opts.Client - if client == nil { - client = sts.NewFromConfig(*cfg) - } - - provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) - - cfg.Credentials = provider - - return nil -} - -func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { - optFns := []func(*stscreds.AssumeRoleOptions){ - func(options *stscreds.AssumeRoleOptions) { - options.RoleSessionName = sharedCfg.RoleSessionName - if sharedCfg.RoleDurationSeconds != nil { - if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { - options.Duration = *sharedCfg.RoleDurationSeconds - } - } - // Assume role with external ID - if len(sharedCfg.ExternalID) > 0 { - options.ExternalID = aws.String(sharedCfg.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.MFASerial) != 0 { - options.SerialNumber = aws.String(sharedCfg.MFASerial) - } - }, - } - - optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) - if err != nil { - return err - } - if found { - optFns = append(optFns, optFn) - } - - { - // Synthesize options early to validate configuration errors sooner to ensure a token provider - // is present if the SerialNumber was set. - var o stscreds.AssumeRoleOptions - for _, fn := range optFns { - fn(&o) - } - if o.TokenProvider == nil && o.SerialNumber != nil { - return AssumeRoleTokenProviderNotSetError{} - } - } - - cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) - - return nil -} - -// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache -// with the provided options if the provider is not already a -// aws.CredentialsCache. -func wrapWithCredentialsCache( - ctx context.Context, - cfgs configs, - provider aws.CredentialsProvider, - optFns ...func(options *aws.CredentialsCacheOptions), -) (aws.CredentialsProvider, error) { - _, ok := provider.(*aws.CredentialsCache) - if ok { - return provider, nil - } - - credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) - if err != nil { - return nil, err - } - - // force allocation of a new slice if the additional options are - // needed, to prevent overwriting the passed in slice of options. - optFns = optFns[:len(optFns):len(optFns)] - if optionsFound { - optFns = append(optFns, credCacheOptions) - } - - return aws.NewCredentialsCache(provider, optFns...), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go deleted file mode 100644 index 00b071fe6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ /dev/null @@ -1,1680 +0,0 @@ -package config - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go-v2/internal/ini" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" - "github.com/aws/smithy-go/logging" - smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" -) - -const ( - // Prefix to use for filtering profiles. The profile prefix should only - // exist in the shared config file, not the credentials file. - profilePrefix = `profile ` - - // Prefix to be used for SSO sections. These are supposed to only exist in - // the shared config file, not the credentials file. - ssoSectionPrefix = `sso-session ` - - // Prefix for services section. It is referenced in profile via the services - // parameter to configure clients for service-specific parameters. - servicesPrefix = `services ` - - // string equivalent for boolean - endpointDiscoveryDisabled = `false` - endpointDiscoveryEnabled = `true` - endpointDiscoveryAuto = `auto` - - // Static Credentials group - accessKeyIDKey = `aws_access_key_id` // group required - secretAccessKey = `aws_secret_access_key` // group required - sessionTokenKey = `aws_session_token` // optional - - // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional - roleDurationSecondsKey = "duration_seconds" // optional - - // AWS Single Sign-On (AWS SSO) group - ssoSessionNameKey = "sso_session" - - ssoRegionKey = "sso_region" - ssoStartURLKey = "sso_start_url" - - ssoAccountIDKey = "sso_account_id" - ssoRoleNameKey = "sso_role_name" - - // Additional Config fields - regionKey = `region` - - // endpoint discovery group - enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional - - // External Credential process - credentialProcessKey = `credential_process` // optional - - // Web Identity Token File - webIdentityTokenFileKey = `web_identity_token_file` // optional - - // S3 ARN Region Usage - s3UseARNRegionKey = "s3_use_arn_region" - - ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" - - ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" - - ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" - - // Use DualStack Endpoint Resolution - useDualStackEndpoint = "use_dualstack_endpoint" - - // DefaultSharedConfigProfile is the default profile to be used when - // loading configuration from the config files if another profile name - // is not provided. - DefaultSharedConfigProfile = `default` - - // S3 Disable Multi-Region AccessPoints - s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` - - useFIPSEndpointKey = "use_fips_endpoint" - - defaultsModeKey = "defaults_mode" - - // Retry options - retryMaxAttemptsKey = "max_attempts" - retryModeKey = "retry_mode" - - caBundleKey = "ca_bundle" - - sdkAppID = "sdk_ua_app_id" - - ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" - - endpointURL = "endpoint_url" - - servicesSectionKey = "services" - - disableRequestCompression = "disable_request_compression" - requestMinCompressionSizeBytes = "request_min_compression_size_bytes" - - s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" - - accountIDKey = "aws_account_id" - accountIDEndpointMode = "account_id_endpoint_mode" - - requestChecksumCalculationKey = "request_checksum_calculation" - responseChecksumValidationKey = "response_checksum_validation" - checksumWhenSupported = "when_supported" - checksumWhenRequired = "when_required" -) - -// defaultSharedConfigProfile allows for swapping the default profile for testing -var defaultSharedConfigProfile = DefaultSharedConfigProfile - -// DefaultSharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func DefaultSharedCredentialsFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") -} - -// DefaultSharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func DefaultSharedConfigFilename() string { - return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") -} - -// DefaultSharedConfigFiles is a slice of the default shared config files that -// the will be used in order to load the SharedConfig. -var DefaultSharedConfigFiles = []string{ - DefaultSharedConfigFilename(), -} - -// DefaultSharedCredentialsFiles is a slice of the default shared credentials -// files that the will be used in order to load the SharedConfig. -var DefaultSharedCredentialsFiles = []string{ - DefaultSharedCredentialsFilename(), -} - -// SSOSession provides the shared configuration parameters of the sso-session -// section. -type SSOSession struct { - Name string - SSORegion string - SSOStartURL string -} - -func (s *SSOSession) setFromIniSection(section ini.Section) { - updateString(&s.Name, section, ssoSessionNameKey) - updateString(&s.SSORegion, section, ssoRegionKey) - updateString(&s.SSOStartURL, section, ssoStartURLKey) -} - -// Services contains values configured in the services section -// of the AWS configuration file. -type Services struct { - // Services section values - // {"serviceId": {"key": "value"}} - // e.g. {"s3": {"endpoint_url": "example.com"}} - ServiceValues map[string]map[string]string -} - -func (s *Services) setFromIniSection(section ini.Section) { - if s.ServiceValues == nil { - s.ServiceValues = make(map[string]map[string]string) - } - for _, service := range section.List() { - s.ServiceValues[service] = section.Map(service) - } -} - -// SharedConfig represents the configuration fields of the SDK config files. -type SharedConfig struct { - Profile string - - // Credentials values from the config file. Both aws_access_key_id - // and aws_secret_access_key must be provided together in the same file - // to be considered valid. The values will be ignored if not a complete group. - // aws_session_token is an optional field that can be provided if both of the - // other two fields are also provided. - // - // aws_access_key_id - // aws_secret_access_key - // aws_session_token - Credentials aws.Credentials - - CredentialSource string - CredentialProcess string - WebIdentityTokenFile string - - // SSO session options - SSOSessionName string - SSOSession *SSOSession - - // Legacy SSO session options - SSORegion string - SSOStartURL string - - // SSO fields not used - SSOAccountID string - SSORoleName string - - RoleARN string - ExternalID string - MFASerial string - RoleSessionName string - RoleDurationSeconds *time.Duration - - SourceProfileName string - Source *SharedConfig - - // Region is the region the SDK should use for looking up AWS service endpoints - // and signing requests. - // - // region = us-west-2 - Region string - - // EnableEndpointDiscovery can be enabled or disabled in the shared config - // by setting endpoint_discovery_enabled to true, or false respectively. - // - // endpoint_discovery_enabled = true - EnableEndpointDiscovery aws.EndpointDiscoveryEnableState - - // Specifies if the S3 service should allow ARNs to direct the region - // the client's requests are sent to. - // - // s3_use_arn_region=true - S3UseARNRegion *bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection - // mode (IPv4 or IPv6) - // - // ec2_metadata_service_endpoint_mode=IPv6 - EC2IMDSEndpointMode imds.EndpointModeState - - // Specifies the EC2 Instance Metadata Service endpoint to use. If - // specified it overrides EC2IMDSEndpointMode. - // - // ec2_metadata_service_endpoint=http://fd00:ec2::254 - EC2IMDSEndpoint string - - // Specifies that IMDS clients should not fallback to IMDSv1 if token - // requests fail. - // - // ec2_metadata_v1_disabled=true - EC2IMDSv1Disabled *bool - - // Specifies if the S3 service should disable support for Multi-Region - // access-points - // - // s3_disable_multiregion_access_points=true - S3DisableMultiRegionAccessPoints *bool - - // Specifies that SDK clients must resolve a dual-stack endpoint for - // services. - // - // use_dualstack_endpoint=true - UseDualStackEndpoint aws.DualStackEndpointState - - // Specifies that SDK clients must resolve a FIPS endpoint for - // services. - // - // use_fips_endpoint=true - UseFIPSEndpoint aws.FIPSEndpointState - - // Specifies which defaults mode should be used by services. - // - // defaults_mode=standard - DefaultsMode aws.DefaultsMode - - // Specifies the maximum number attempts an API client will call an - // operation that fails with a retryable error. - // - // max_attempts=3 - RetryMaxAttempts int - - // Specifies the retry model the API client will be created with. - // - // retry_mode=standard - RetryMode aws.RetryMode - - // Sets the path to a custom Credentials Authority (CA) Bundle PEM file - // that the SDK will use instead of the system's root CA bundle. Only use - // this if you want to configure the SDK to use a custom set of CAs. - // - // Enabling this option will attempt to merge the Transport into the SDK's - // HTTP client. If the client's Transport is not a http.Transport an error - // will be returned. If the Transport's TLS config is set this option will - // cause the SDK to overwrite the Transport's TLS config's RootCAs value. - // - // Setting a custom HTTPClient in the aws.Config options will override this - // setting. To use this option and custom HTTP client, the HTTP client - // needs to be provided when creating the config. Not the service client. - // - // ca_bundle=$HOME/my_custom_ca_bundle - CustomCABundle string - - // aws sdk app ID that can be added to user agent header string - AppID string - - // Flag used to disable configured endpoints. - IgnoreConfiguredEndpoints *bool - - // Value to contain configured endpoints to be propagated to - // corresponding endpoint resolution field. - BaseEndpoint string - - // Services section config. - ServicesSectionName string - Services Services - - // determine if request compression is allowed, default to false - // retrieved from config file's profile field disable_request_compression - DisableRequestCompression *bool - - // inclusive threshold request body size to trigger compression, - // default to 10240 and must be within 0 and 10485760 bytes inclusive - // retrieved from config file's profile field request_min_compression_size_bytes - RequestMinCompressSizeBytes *int64 - - // Whether S3Express auth is disabled. - // - // This will NOT prevent requests from being made to S3Express buckets, it - // will only bypass the modified endpoint routing and signing behaviors - // associated with the feature. - S3DisableExpressAuth *bool - - AccountIDEndpointMode aws.AccountIDEndpointMode - - // RequestChecksumCalculation indicates if the request checksum should be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // ResponseChecksumValidation indicates if the response checksum should be validated - ResponseChecksumValidation aws.ResponseChecksumValidation -} - -func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { - if len(c.DefaultsMode) == 0 { - return "", false, nil - } - - return c.DefaultsMode, true, nil -} - -// GetRetryMaxAttempts returns the maximum number of attempts an API client -// created Retryer should attempt an operation call before failing. -func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { - if c.RetryMaxAttempts == 0 { - return 0, false, nil - } - - return c.RetryMaxAttempts, true, nil -} - -// GetRetryMode returns the model the API client should create its Retryer in. -func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { - if len(c.RetryMode) == 0 { - return "", false, nil - } - - return c.RetryMode, true, nil -} - -// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region -// the client's requests are sent to. -func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { - if c.S3UseARNRegion == nil { - return false, false, nil - } - - return *c.S3UseARNRegion, true, nil -} - -// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. -func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { - if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { - return aws.EndpointDiscoveryUnset, false, nil - } - - return c.EnableEndpointDiscovery, true, nil -} - -// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region -// access-points. -func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { - if c.S3DisableMultiRegionAccessPoints == nil { - return false, false, nil - } - - return *c.S3DisableMultiRegionAccessPoints, true, nil -} - -// GetRegion returns the region for the profile if a region is set. -func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { - if len(c.Region) == 0 { - return "", false, nil - } - return c.Region, true, nil -} - -// GetCredentialsProvider returns the credentials for a profile if they were set. -func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { - return c.Credentials, true, nil -} - -// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { - if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { - return imds.EndpointModeStateUnset, false, nil - } - - return c.EC2IMDSEndpointMode, true, nil -} - -// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. -func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { - if len(c.EC2IMDSEndpoint) == 0 { - return "", false, nil - } - - return c.EC2IMDSEndpoint, true, nil -} - -// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option -// resolver interface. -func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { - if c.EC2IMDSv1Disabled == nil { - return false, false - } - - return *c.EC2IMDSv1Disabled, true -} - -// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be -// used for requests. -func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { - if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - return aws.DualStackEndpointStateUnset, false, nil - } - - return c.UseDualStackEndpoint, true, nil -} - -// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be -// used for requests. -func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { - if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { - return aws.FIPSEndpointStateUnset, false, nil - } - - return c.UseFIPSEndpoint, true, nil -} - -// GetS3DisableExpressAuth returns the configured value for -// [SharedConfig.S3DisableExpressAuth]. -func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) { - if c.S3DisableExpressAuth == nil { - return false, false - } - - return *c.S3DisableExpressAuth, true -} - -// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was -func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { - if len(c.CustomCABundle) == 0 { - return nil, false, nil - } - - b, err := ioutil.ReadFile(c.CustomCABundle) - if err != nil { - return nil, false, err - } - return bytes.NewReader(b), true, nil -} - -// getAppID returns the sdk app ID if set in shared config profile -func (c SharedConfig) getAppID(context.Context) (string, bool, error) { - return c.AppID, len(c.AppID) > 0, nil -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { - if c.IgnoreConfiguredEndpoints == nil { - return false, false, nil - } - - return *c.IgnoreConfiguredEndpoints, true, nil -} - -func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { - return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil -} - -// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use -// with configured endpoints. -func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { - if endpt, ok := service[endpointURL]; ok { - return endpt, true, nil - } - } - return "", false, nil -} - -func normalizeShared(sdkID string) string { - lower := strings.ToLower(sdkID) - return strings.ReplaceAll(lower, " ", "_") -} - -func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { - return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil -} - -// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the -// addition of ignoring when none of the files exist or when the profile -// is not found in any of the files. -func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { - cfg, err := loadSharedConfig(ctx, configs) - if err != nil { - if _, ok := err.(SharedConfigProfileNotExistError); ok { - return SharedConfig{}, nil - } - return nil, err - } - - return cfg, nil -} - -// loadSharedConfig uses the configs passed in to load the SharedConfig from file -// The file names and profile name are sourced from the configs. -// -// If profile name is not provided DefaultSharedConfigProfile (default) will -// be used. -// -// If shared config filenames are not provided DefaultSharedConfigFiles will -// be used. -// -// Config providers used: -// * sharedConfigProfileProvider -// * sharedConfigFilesProvider -func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { - var profile string - var configFiles []string - var credentialsFiles []string - var ok bool - var err error - - profile, ok, err = getSharedConfigProfile(ctx, configs) - if err != nil { - return nil, err - } - if !ok { - profile = defaultSharedConfigProfile - } - - configFiles, ok, err = getSharedConfigFiles(ctx, configs) - if err != nil { - return nil, err - } - - credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) - if err != nil { - return nil, err - } - - // setup logger if log configuration warning is seti - var logger logging.Logger - logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if found && logWarnings { - logger, found, err = getLogger(ctx, configs) - if err != nil { - return SharedConfig{}, err - } - if !found { - logger = logging.NewStandardLogger(os.Stderr) - } - } - - return LoadSharedConfigProfile(ctx, profile, - func(o *LoadSharedConfigOptions) { - o.Logger = logger - o.ConfigFiles = configFiles - o.CredentialsFiles = credentialsFiles - }, - ) -} - -// LoadSharedConfigOptions struct contains optional values that can be used to load the config. -type LoadSharedConfigOptions struct { - - // CredentialsFiles are the shared credentials files - CredentialsFiles []string - - // ConfigFiles are the shared config files - ConfigFiles []string - - // Logger is the logger used to log shared config behavior - Logger logging.Logger -} - -// LoadSharedConfigProfile retrieves the configuration from the list of files -// using the profile provided. The order the files are listed will determine -// precedence. Values in subsequent files will overwrite values defined in -// earlier files. -// -// For example, given two files A and B. Both define credentials. If the order -// of the files are A then B, B's credential values will be used instead of A's. -// -// If config files are not set, SDK will default to using a file at location `.aws/config` if present. -// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. -// No default files are set, if files set to an empty slice. -// -// You can read more about shared config and credentials file location at -// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location -func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { - var option LoadSharedConfigOptions - for _, fn := range optFns { - fn(&option) - } - - if option.ConfigFiles == nil { - option.ConfigFiles = DefaultSharedConfigFiles - } - - if option.CredentialsFiles == nil { - option.CredentialsFiles = DefaultSharedCredentialsFiles - } - - // load shared configuration sections from shared configuration INI options - configSections, err := loadIniFiles(option.ConfigFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processConfigSections(ctx, &configSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - // load shared credentials sections from shared credentials INI options - credentialsSections, err := loadIniFiles(option.CredentialsFiles) - if err != nil { - return SharedConfig{}, err - } - - // check for profile prefix and drop duplicates or invalid profiles - err = processCredentialsSections(ctx, &credentialsSections, option.Logger) - if err != nil { - return SharedConfig{}, err - } - - err = mergeSections(&configSections, credentialsSections) - if err != nil { - return SharedConfig{}, err - } - - cfg := SharedConfig{} - profiles := map[string]struct{}{} - - if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { - return SharedConfig{}, err - } - - return cfg, nil -} - -func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - skipSections := map[string]struct{}{} - - for _, section := range sections.List() { - if _, ok := skipSections[section]; ok { - continue - } - - // drop sections from config file that do not have expected prefixes. - switch { - case strings.HasPrefix(section, profilePrefix): - // Rename sections to remove "profile " prefixing to match with - // credentials file. If default is already present, it will be - // dropped. - newName, err := renameProfileSection(section, sections, logger) - if err != nil { - return fmt.Errorf("failed to rename profile section, %w", err) - } - skipSections[newName] = struct{}{} - - case strings.HasPrefix(section, ssoSectionPrefix): - case strings.HasPrefix(section, servicesPrefix): - case strings.EqualFold(section, "default"): - default: - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ - "For use within a shared configuration file, "+ - "a non-default profile must have `profile ` "+ - "prefixed to the profile name.", - section, - ) - } - } - } - return nil -} - -func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { - v, ok := sections.GetSection(section) - if !ok { - return "", fmt.Errorf("error processing profiles within the shared configuration files") - } - - // delete section with profile as prefix - sections.DeleteSection(section) - - // set the value to non-prefixed name in sections. - section = strings.TrimPrefix(section, profilePrefix) - if sections.HasSection(section) { - oldSection, _ := sections.GetSection(section) - v.Logs = append(v.Logs, - fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ - "overriding non-default profile from %s", - v.SourceFile, oldSection.SourceFile)) - sections.DeleteSection(section) - } - - // assign non-prefixed name to section - v.Name = section - sections.SetSection(section, v) - - return section, nil -} - -func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { - for _, section := range sections.List() { - // drop profiles with prefix for credential files - if strings.HasPrefix(section, profilePrefix) { - // drop this section, as invalid profile name - sections.DeleteSection(section) - - if logger != nil { - logger.Logf(logging.Debug, - "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ - "for the shared credentials file.\n", - section, - ) - } - } - } - return nil -} - -func loadIniFiles(filenames []string) (ini.Sections, error) { - mergedSections := ini.NewSections() - - for _, filename := range filenames { - sections, err := ini.OpenFile(filename) - var v *ini.UnableToReadFile - if ok := errors.As(err, &v); ok { - // Skip files which can't be opened and read for whatever reason. - // We treat such files as empty, and do not fall back to other locations. - continue - } else if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - - // mergeSections into mergedSections - err = mergeSections(&mergedSections, sections) - if err != nil { - return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} - } - } - - return mergedSections, nil -} - -// mergeSections merges source section properties into destination section properties -func mergeSections(dst *ini.Sections, src ini.Sections) error { - for _, sectionName := range src.List() { - srcSection, _ := src.GetSection(sectionName) - - if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || - (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { - srcSection.Errors = append(srcSection.Errors, - fmt.Errorf("partial credentials found for profile %v", sectionName)) - } - - if !dst.HasSection(sectionName) { - dst.SetSection(sectionName, srcSection) - continue - } - - // merge with destination srcSection - dstSection, _ := dst.GetSection(sectionName) - - // errors should be overriden if any - dstSection.Errors = srcSection.Errors - - // Access key id update - if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { - accessKey := srcSection.String(accessKeyIDKey) - secretKey := srcSection.String(secretAccessKey) - - if dstSection.Has(accessKeyIDKey) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, - dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) - } - - // update access key - v, err := ini.NewStringValue(accessKey) - if err != nil { - return fmt.Errorf("error merging access key, %w", err) - } - dstSection.UpdateValue(accessKeyIDKey, v) - - // update secret key - v, err = ini.NewStringValue(secretKey) - if err != nil { - return fmt.Errorf("error merging secret key, %w", err) - } - dstSection.UpdateValue(secretAccessKey, v) - - // update session token - if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { - return err - } - - // update source file to reflect where the static creds came from - dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) - dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) - } - - stringKeys := []string{ - roleArnKey, - sourceProfileKey, - credentialSourceKey, - externalIDKey, - mfaSerialKey, - roleSessionNameKey, - regionKey, - enableEndpointDiscoveryKey, - credentialProcessKey, - webIdentityTokenFileKey, - s3UseARNRegionKey, - s3DisableMultiRegionAccessPointsKey, - ec2MetadataServiceEndpointModeKey, - ec2MetadataServiceEndpointKey, - ec2MetadataV1DisabledKey, - useDualStackEndpoint, - useFIPSEndpointKey, - defaultsModeKey, - retryModeKey, - caBundleKey, - roleDurationSecondsKey, - retryMaxAttemptsKey, - - ssoSessionNameKey, - ssoAccountIDKey, - ssoRegionKey, - ssoRoleNameKey, - ssoStartURLKey, - } - for i := range stringKeys { - if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { - return err - } - } - - // set srcSection on dst srcSection - *dst = dst.SetSection(sectionName, dstSection) - } - - return nil -} - -func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { - if srcSection.Has(key) { - srcValue := srcSection.String(key) - val, err := ini.NewStringValue(srcValue) - if err != nil { - return fmt.Errorf("error merging %s, %w", key, err) - } - - if dstSection.Has(key) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, - dstSection.SourceFile[key], srcSection.SourceFile[key])) - } - - dstSection.UpdateValue(key, val) - dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) - } - return nil -} - -func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { - return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ - "with a %v value found in a duplicate profile defined at file %v. \n", - sectionName, key, dstSourceFile, key, srcSourceFile) -} - -// Returns an error if all of the files fail to load. If at least one file is -// successfully loaded and contains the profile, no error will be returned. -func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, - sections ini.Sections, logger logging.Logger) error { - c.Profile = profile - - section, ok := sections.GetSection(profile) - if !ok { - return SharedConfigProfileNotExistError{ - Profile: profile, - } - } - - // if logs are appended to the section, log them - if section.Logs != nil && logger != nil { - for _, log := range section.Logs { - logger.Logf(logging.Debug, log) - } - } - - // set config from the provided INI section - err := c.setFromIniSection(profile, section) - if err != nil { - return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) - } - - if _, ok := profiles[profile]; ok { - // if this is the second instance of the profile the Assume Role - // options must be cleared because they are only valid for the - // first reference of a profile. The self linked instance of the - // profile only have credential provider options. - c.clearAssumeRoleOptions() - } else { - // First time a profile has been seen. Assert if the credential type - // requires a role ARN, the ARN is also set - if err := c.validateCredentialsConfig(profile); err != nil { - return err - } - } - - // if not top level profile and has credentials, return with credentials. - if len(profiles) != 0 && c.Credentials.HasKeys() { - return nil - } - - profiles[profile] = struct{}{} - - // validate no colliding credentials type are present - if err := c.validateCredentialType(); err != nil { - return err - } - - // Link source profiles for assume roles - if len(c.SourceProfileName) != 0 { - // Linked profile via source_profile ignore credential provider - // options, the source profile must provide the credentials. - c.clearCredentialOptions() - - srcCfg := &SharedConfig{} - err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) - if err != nil { - // SourceProfileName that doesn't exist is an error in configuration. - if _, ok := err.(SharedConfigProfileNotExistError); ok { - err = SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - Err: err, - } - } - return err - } - - if !srcCfg.hasCredentials() { - return SharedConfigAssumeRoleError{ - RoleARN: c.RoleARN, - Profile: c.SourceProfileName, - } - } - - c.Source = srcCfg - } - - // If the profile contains an SSO session parameter, the session MUST exist - // as a section in the config file. Load the SSO session using the name - // provided. If the session section is not found or incomplete an error - // will be returned. - if c.hasSSOTokenProviderConfiguration() { - section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) - if !ok { - return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) - } - var ssoSession SSOSession - ssoSession.setFromIniSection(section) - ssoSession.Name = c.SSOSessionName - c.SSOSession = &ssoSession - } - - if len(c.ServicesSectionName) > 0 { - if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok { - var svcs Services - svcs.setFromIniSection(section) - c.Services = svcs - } - } - - return nil -} - -// setFromIniSection loads the configuration from the profile section defined in -// the provided INI file. A SharedConfig pointer type value is used so that -// multiple config file loadings can be chained. -// -// Only loads complete logically grouped values, and will not set fields in cfg -// for incomplete grouped values in the config. Such as credentials. For example -// if a config file only includes aws_access_key_id but no aws_secret_access_key -// the aws_access_key_id will be ignored. -func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { - if len(section.Name) == 0 { - sources := make([]string, 0) - for _, v := range section.SourceFile { - sources = append(sources, v) - } - - return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) - } - - if len(section.Errors) != 0 { - var errStatement string - for i, e := range section.Errors { - errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) - } - return fmt.Errorf("Error using profile: \n %v", errStatement) - } - - // Assume Role - updateString(&c.RoleARN, section, roleArnKey) - updateString(&c.ExternalID, section, externalIDKey) - updateString(&c.MFASerial, section, mfaSerialKey) - updateString(&c.RoleSessionName, section, roleSessionNameKey) - updateString(&c.SourceProfileName, section, sourceProfileKey) - updateString(&c.CredentialSource, section, credentialSourceKey) - updateString(&c.Region, section, regionKey) - - // AWS Single Sign-On (AWS SSO) - // SSO session options - updateString(&c.SSOSessionName, section, ssoSessionNameKey) - - // Legacy SSO session options - updateString(&c.SSORegion, section, ssoRegionKey) - updateString(&c.SSOStartURL, section, ssoStartURLKey) - - // SSO fields not used - updateString(&c.SSOAccountID, section, ssoAccountIDKey) - updateString(&c.SSORoleName, section, ssoRoleNameKey) - - // we're retaining a behavioral quirk with this field that existed before - // the removal of literal parsing for #2276: - // - if the key is missing, the config field will not be set - // - if the key is set to a non-numeric, the config field will be set to 0 - if section.Has(roleDurationSecondsKey) { - if v, ok := section.Int(roleDurationSecondsKey); ok { - c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) - } else { - c.RoleDurationSeconds = aws.Duration(time.Duration(0)) - } - } - - updateString(&c.CredentialProcess, section, credentialProcessKey) - updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) - - updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) - updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) - updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) - updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey) - - if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) - } - updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) - updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) - - updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) - updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) - - if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) - } - - if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) - } - if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) - } - - updateString(&c.CustomCABundle, section, caBundleKey) - - // user agent app ID added to request User-Agent header - updateString(&c.AppID, section, sdkAppID) - - updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) - - updateString(&c.BaseEndpoint, section, endpointURL) - - if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err) - } - if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) - } - - if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) - } - - if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) - } - if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil { - return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) - } - - // Shared Credentials - creds := aws.Credentials{ - AccessKeyID: section.String(accessKeyIDKey), - SecretAccessKey: section.String(secretAccessKey), - SessionToken: section.String(sessionTokenKey), - Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), - AccountID: section.String(accountIDKey), - } - - if creds.HasKeys() { - c.Credentials = creds - } - - updateString(&c.ServicesSectionName, section, servicesSectionKey) - - return nil -} - -func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v, ok := sec.Int(key) - if !ok { - return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key)) - } - if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes { - return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v) - } - *bytes = new(int64) - **bytes = v - return nil -} - -func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch { - case v == "true": - *disable = new(bool) - **disable = true - case v == "false": - *disable = new(bool) - **disable = false - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v) - } - return nil -} - -func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch v { - case "preferred": - *m = aws.AccountIDEndpointModePreferred - case "required": - *m = aws.AccountIDEndpointModeRequired - case "disabled": - *m = aws.AccountIDEndpointModeDisabled - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) - } - - return nil -} - -func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch strings.ToLower(v) { - case checksumWhenSupported: - *m = aws.RequestChecksumCalculationWhenSupported - case checksumWhenRequired: - *m = aws.RequestChecksumCalculationWhenRequired - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) - } - - return nil -} - -func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error { - if !sec.Has(key) { - return nil - } - - v := sec.String(key) - switch strings.ToLower(v) { - case checksumWhenSupported: - *m = aws.ResponseChecksumValidationWhenSupported - case checksumWhenRequired: - *m = aws.ResponseChecksumValidationWhenRequired - default: - return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) - } - - return nil -} - -func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { - if c.RequestMinCompressSizeBytes == nil { - return 0, false, nil - } - return *c.RequestMinCompressSizeBytes, true, nil -} - -func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { - if c.DisableRequestCompression == nil { - return false, false, nil - } - return *c.DisableRequestCompression, true, nil -} - -func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { - return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil -} - -func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { - return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil -} - -func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { - return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil -} - -func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - if ok := mode.SetFromString(value); !ok { - return fmt.Errorf("invalid value: %s", value) - } - return nil -} - -func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { - if !section.Has(key) { - return nil - } - value := section.String(key) - if *mode, err = aws.ParseRetryMode(value); err != nil { - return err - } - return nil -} - -func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - value := section.String(key) - return endpointMode.SetFromString(value) -} - -func (c *SharedConfig) validateCredentialsConfig(profile string) error { - if err := c.validateCredentialsRequireARN(profile); err != nil { - return err - } - - return nil -} - -func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { - var credSource string - - switch { - case len(c.SourceProfileName) != 0: - credSource = sourceProfileKey - case len(c.CredentialSource) != 0: - credSource = credentialSourceKey - case len(c.WebIdentityTokenFile) != 0: - credSource = webIdentityTokenFileKey - } - - if len(credSource) != 0 && len(c.RoleARN) == 0 { - return CredentialRequiresARNError{ - Type: credSource, - Profile: profile, - } - } - - return nil -} - -func (c *SharedConfig) validateCredentialType() error { - // Only one or no credential type can be defined. - if !oneOrNone( - len(c.SourceProfileName) != 0, - len(c.CredentialSource) != 0, - len(c.CredentialProcess) != 0, - len(c.WebIdentityTokenFile) != 0, - ) { - return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") - } - - return nil -} - -func (c *SharedConfig) validateSSOConfiguration() error { - if c.hasSSOTokenProviderConfiguration() { - err := c.validateSSOTokenProviderConfiguration() - if err != nil { - return err - } - return nil - } - - if c.hasLegacySSOConfiguration() { - err := c.validateLegacySSOConfiguration() - if err != nil { - return err - } - } - return nil -} - -func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { - var missing []string - - if len(c.SSOSessionName) == 0 { - missing = append(missing, ssoSessionNameKey) - } - - if c.SSOSession == nil { - missing = append(missing, ssoSectionPrefix) - } else { - if len(c.SSOSession.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOSession.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - - if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) - } - - if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { - return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) - } - - return nil -} - -func (c *SharedConfig) validateLegacySSOConfiguration() error { - var missing []string - - if len(c.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(c.SSOStartURL) == 0 { - missing = append(missing, ssoStartURLKey) - } - - if len(c.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(c.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - c.Profile, strings.Join(missing, ", ")) - } - return nil -} - -func (c *SharedConfig) hasCredentials() bool { - switch { - case len(c.SourceProfileName) != 0: - case len(c.CredentialSource) != 0: - case len(c.CredentialProcess) != 0: - case len(c.WebIdentityTokenFile) != 0: - case c.hasSSOConfiguration(): - case c.Credentials.HasKeys(): - default: - return false - } - - return true -} - -func (c *SharedConfig) hasSSOConfiguration() bool { - return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() -} - -func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { - return len(c.SSOSessionName) > 0 -} - -func (c *SharedConfig) hasLegacySSOConfiguration() bool { - return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 -} - -func (c *SharedConfig) clearAssumeRoleOptions() { - c.RoleARN = "" - c.ExternalID = "" - c.MFASerial = "" - c.RoleSessionName = "" - c.SourceProfileName = "" -} - -func (c *SharedConfig) clearCredentialOptions() { - c.CredentialSource = "" - c.CredentialProcess = "" - c.WebIdentityTokenFile = "" - c.Credentials = aws.Credentials{} - c.SSOAccountID = "" - c.SSORegion = "" - c.SSORoleName = "" - c.SSOStartURL = "" -} - -// SharedConfigLoadError is an error for the shared config file failed to load. -type SharedConfigLoadError struct { - Filename string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigLoadError) Unwrap() error { - return e.Err -} - -func (e SharedConfigLoadError) Error() string { - return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) -} - -// SharedConfigProfileNotExistError is an error for the shared config when -// the profile was not find in the config file. -type SharedConfigProfileNotExistError struct { - Filename []string - Profile string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigProfileNotExistError) Unwrap() error { - return e.Err -} - -func (e SharedConfigProfileNotExistError) Error() string { - return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) -} - -// SharedConfigAssumeRoleError is an error for the shared config when the -// profile contains assume role information, but that information is invalid -// or not complete. -type SharedConfigAssumeRoleError struct { - Profile string - RoleARN string - Err error -} - -// Unwrap returns the underlying error that caused the failure. -func (e SharedConfigAssumeRoleError) Unwrap() error { - return e.Err -} - -func (e SharedConfigAssumeRoleError) Error() string { - return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", - e.RoleARN, e.Profile, e.Err) -} - -// CredentialRequiresARNError provides the error for shared config credentials -// that are incorrectly configured in the shared config or credentials file. -type CredentialRequiresARNError struct { - // type of credentials that were configured. - Type string - - // Profile name the credentials were in. - Profile string -} - -// Error satisfies the error interface. -func (e CredentialRequiresARNError) Error() string { - return fmt.Sprintf( - "credential type %s requires role_arn, profile %s", - e.Type, e.Profile, - ) -} - -func oneOrNone(bs ...bool) bool { - var count int - - for _, b := range bs { - if b { - count++ - if count > 1 { - return false - } - } - } - - return true -} - -// updateString will only update the dst with the value in the section key, key -// is present in the section. -func updateString(dst *string, section ini.Section, key string) { - if !section.Has(key) { - return - } - *dst = section.String(key) -} - -// updateInt will only update the dst with the value in the section key, key -// is present in the section. -// -// Down casts the INI integer value from a int64 to an int, which could be -// different bit size depending on platform. -func updateInt(dst *int, section ini.Section, key string) error { - if !section.Has(key) { - return nil - } - - v, ok := section.Int(key) - if !ok { - return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) - } - - *dst = int(v) - return nil -} - -// updateBool will only update the dst with the value in the section key, key -// is present in the section. -func updateBool(dst *bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = v -} - -// updateBoolPtr will only update the dst with the value in the section key, -// key is present in the section. -func updateBoolPtr(dst **bool, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - v, _ := section.Bool(key) - *dst = new(bool) - **dst = v -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - value := section.String(key) - if len(value) == 0 { - return - } - - switch { - case strings.EqualFold(value, endpointDiscoveryDisabled): - *dst = aws.EndpointDiscoveryDisabled - case strings.EqualFold(value, endpointDiscoveryEnabled): - *dst = aws.EndpointDiscoveryEnabled - case strings.EqualFold(value, endpointDiscoveryAuto): - *dst = aws.EndpointDiscoveryAuto - } -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.DualStackEndpointStateEnabled - } else { - *dst = aws.DualStackEndpointStateDisabled - } - - return -} - -// updateEndpointDiscoveryType will only update the dst with the value in the section, if -// a valid key and corresponding EndpointDiscoveryType is found. -func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { - if !section.Has(key) { - return - } - - // retains pre-#2276 behavior where non-bool value would resolve to false - if v, _ := section.Bool(key); v { - *dst = aws.FIPSEndpointStateEnabled - } else { - *dst = aws.FIPSEndpointStateDisabled - } - - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md deleted file mode 100644 index 16337f8a7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ /dev/null @@ -1,698 +0,0 @@ -# v1.17.53 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.52 (2025-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.51 (2025-01-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.50 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.49 (2025-01-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.48 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.47 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.46 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.45 (2024-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.44 (2024-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.43 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.42 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.41 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.40 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.39 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.38 (2024-10-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.37 (2024-09-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.36 (2024-09-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.35 (2024-09-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.34 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.33 (2024-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.32 (2024-09-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.31 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.30 (2024-08-26) - -* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility. - -# v1.17.29 (2024-08-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.28 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.27 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.26 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.25 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.24 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.23 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.22 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.21 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.20 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.19 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.13 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2023-12-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-11-21) - -* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider. - -# v1.16.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-11-14) - -* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. - -# v1.15.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-11-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.41 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.40 (2023-09-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.39 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.38 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.37 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.36 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.31 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.30 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.29 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.28 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.27 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.26 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.25 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.24 (2023-05-09) - -* No change notes available for this release. - -# v1.13.23 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.22 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.19 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-02-01) - -* No change notes available for this release. - -# v1.13.10 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-12-15) - -* **Bug Fix**: Unify logic between shared config and in finding home directory -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-11-11) - -* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 -* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider - -# v1.12.24 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-04-25) - -* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-23) - -* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-02-24) - -* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.5 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.4 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-10) - -* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. - -# v1.4.0 (2021-08-27) - -* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go deleted file mode 100644 index f6e2873ab..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package credentials provides types for retrieving credentials from credentials sources. -*/ -package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go deleted file mode 100644 index 6ed71b42b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package ec2rolecreds provides the credentials provider implementation for -// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDK's AWS Config -// -// The EC2 Instance role credentials provider will automatically be the resolved -// credential provider in the credential chain if no other credential provider is -// resolved first. -// -// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance -// role for credentials, you specify a `credentials_source` property in the config -// profile the SDK will load. -// -// [default] -// credential_source = Ec2InstanceMetadata -// -// # Loading credentials with the Provider directly -// -// Another way to use the EC2 Instance role credentials provider is to create it -// directly and assign it as the credentials provider for an API client. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// provider := imds.New(imds.Options{}) -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set the configuration options on the -// credentials provider using the imds.Options type to configure the EC2 IMDS -// API Client and ExpiryWindow of the retrieved credentials. -// -// provider := imds.New(imds.Options{ -// // See imds.Options type's documentation for more options available. -// Client: imds.New(Options{ -// HTTPClient: customHTTPClient, -// }), -// -// // Modify how soon credentials expire prior to their original expiry time. -// ExpiryWindow: 5 * time.Minute, -// }) -// -// # EC2 IMDS API Client -// -// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on -// configuring the client, and options available. -package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go deleted file mode 100644 index 5c699f166..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ /dev/null @@ -1,229 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "context" - "encoding/json" - "fmt" - "math" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the -// GetMetadata operation. -type GetMetadataAPIClient interface { - GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) -} - -// A Provider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// The New function must be used to create the with a custom EC2 IMDS client. -// -// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ -// o.Client = imds.New(imds.Options{/* custom options */}) -// }) -type Provider struct { - options Options -} - -// Options is a list of user settable options for setting the behavior of the Provider. -type Options struct { - // The API client that will be used by the provider to make GetMetadata API - // calls to EC2 IMDS. - // - // If nil, the provider will default to the EC2 IMDS client. - Client GetMetadataAPIClient -} - -// New returns an initialized Provider value configured to retrieve -// credentials from EC2 Instance Metadata service. -func New(optFns ...func(*Options)) *Provider { - options := Options{} - - for _, fn := range optFns { - fn(&options) - } - - if options.Client == nil { - options.Client = imds.New(imds.Options{}) - } - - return &Provider{ - options: options, - } -} - -// Retrieve retrieves credentials from the EC2 service. Error will be returned -// if the request fails, or unable to extract the desired credentials. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - credsList, err := requestCredList(ctx, p.options.Client) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - if len(credsList) == 0 { - return aws.Credentials{Source: ProviderName}, - fmt.Errorf("unexpected empty EC2 IMDS role list") - } - credsName := credsList[0] - - roleCreds, err := requestCred(ctx, p.options.Client, credsName) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - creds := aws.Credentials{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - Source: ProviderName, - - CanExpire: true, - Expires: roleCreds.Expiration, - } - - // Cap role credentials Expires to 1 hour so they can be refreshed more - // often. Jitter will be applied credentials cache if being used. - if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { - creds.Expires = anHour - } - - return creds, nil -} - -// HandleFailToRefresh will extend the credentials Expires time if it it is -// expired. If the credentials will not expire within the minimum time, they -// will be returned. -// -// If the credentials cannot expire, the original error will be returned. -func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( - aws.Credentials, error, -) { - if !prevCreds.CanExpire { - return aws.Credentials{}, err - } - - if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { - return prevCreds, nil - } - - newCreds := prevCreds - randFloat64, err := sdkrand.CryptoRandFloat64() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) - } - - // Random distribution of [5,15) minutes. - expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute - newCreds.Expires = sdk.NowTime().Add(expireOffset) - - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) - - return newCreds, nil -} - -// AdjustExpiresBy will adds the passed in duration to the passed in -// credential's Expires time, unless the time until Expires is less than 15 -// minutes. Returns the credentials, even if not updated. -func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( - aws.Credentials, error, -) { - if !creds.CanExpire { - return creds, nil - } - if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { - return creds, nil - } - - creds.Expires = creds.Expires.Add(dur) - return creds, nil -} - -// ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials/" - -// requestCredList requests a list of credentials from the EC2 service. If -// there are no credentials, or there is an error making or receiving the -// request -func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: iamSecurityCredsPath, - }) - if err != nil { - return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) - } - defer resp.Content.Close() - - credsList := []string{} - s := bufio.NewScanner(resp.Content) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ - Path: path.Join(iamSecurityCredsPath, credsName), - }) - if err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, err) - } - defer resp.Content.Close() - - var respCreds ec2RoleCredRespBody - if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", - credsName, err) - } - - if !strings.EqualFold(respCreds.Code, "Success") { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, - fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", - credsName, - &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) - } - - return respCreds, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go deleted file mode 100644 index c3f5dadce..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go deleted file mode 100644 index dc291c97c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ /dev/null @@ -1,165 +0,0 @@ -package client - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID is the client identifer -const ServiceID = "endpoint-credentials" - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is the endpoint client configurable options -type Options struct { - // The endpoint to retrieve credentials from - Endpoint string - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*smithymiddleware.Stack) error -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - return to -} - -// Client is an client for retrieving AWS credentials from an endpoint -type Client struct { - options Options -} - -// New constructs a new Client from the given options -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - if options.HTTPClient == nil { - options.HTTPClient = awshttp.NewBuildableClient() - } - - if options.Retryer == nil { - // Amazon-owned implementations of this endpoint are known to sometimes - // return plaintext responses (i.e. no Code) like normal, add a few - // additional status codes - options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) { - o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{ - Codes: map[int]struct{}{ - http.StatusTooManyRequests: {}, - }, - }) - }) - } - - for _, fn := range optFns { - fn(&options) - } - - client := &Client{ - options: options, - } - - return client -} - -// GetCredentialsInput is the input to send with the endpoint service to receive credentials. -type GetCredentialsInput struct { - AuthorizationToken string -} - -// GetCredentials retrieves credentials from credential endpoint -func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { - stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) - stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) - stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) - addProtocolFinalizerMiddlewares(stack, options, "GetCredentials") - retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) - middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) - smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) - smithyhttp.AddCloseResponseBodyMiddleware(stack) - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, err - } - } - - handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, _, err := handler.Handle(ctx, params) - if err != nil { - return nil, err - } - - return result.(*GetCredentialsOutput), err -} - -// GetCredentialsOutput is the response from the credential endpoint -type GetCredentialsOutput struct { - Expiration *time.Time - AccessKeyID string - SecretAccessKey string - Token string - AccountID string -} - -// EndpointError is an error returned from the endpoint service -type EndpointError struct { - Code string `json:"code"` - Message string `json:"message"` - Fault smithy.ErrorFault `json:"-"` - statusCode int `json:"-"` -} - -// Error is the error mesage string -func (e *EndpointError) Error() string { - return fmt.Sprintf("%s: %s", e.Code, e.Message) -} - -// ErrorCode is the error code returned by the endpoint -func (e *EndpointError) ErrorCode() string { - return e.Code -} - -// ErrorMessage is the error message returned by the endpoint -func (e *EndpointError) ErrorMessage() string { - return e.Message -} - -// ErrorFault indicates error fault classification -func (e *EndpointError) ErrorFault() smithy.ErrorFault { - return e.Fault -} - -// HTTPStatusCode implements retry.HTTPStatusCode. -func (e *EndpointError) HTTPStatusCode() int { - return e.statusCode -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go deleted file mode 100644 index 748ee6724..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go deleted file mode 100644 index f2820d20e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go +++ /dev/null @@ -1,164 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/aws/smithy-go" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type buildEndpoint struct { - Endpoint string -} - -func (b *buildEndpoint) ID() string { - return "BuildEndpoint" -} - -func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( - out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) - } - - if len(b.Endpoint) == 0 { - return out, metadata, fmt.Errorf("endpoint not provided") - } - - parsed, err := url.Parse(b.Endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) - } - - request.URL = parsed - - return next.HandleBuild(ctx, in) -} - -type serializeOpGetCredential struct{} - -func (s *serializeOpGetCredential) ID() string { - return "OperationSerializer" -} - -func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( - out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) - } - - params, ok := in.Parameters.(*GetCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) - } - - const acceptHeader = "Accept" - request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") - - if len(params.AuthorizationToken) > 0 { - const authHeader = "Authorization" - request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) - } - - return next.HandleSerialize(ctx, in) -} - -type deserializeOpGetCredential struct{} - -func (d *deserializeOpGetCredential) ID() string { - return "OperationDeserializer" -} - -func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( - out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, deserializeError(response) - } - - var shape *GetCredentialsOutput - if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} - } - - out.Result = shape - return out, metadata, err -} - -func deserializeError(response *smithyhttp.Response) error { - // we could be talking to anything, json isn't guaranteed - // see https://github.com/aws/aws-sdk-go-v2/issues/2316 - if response.Header.Get("Content-Type") == "application/json" { - return deserializeJSONError(response) - } - - msg, err := io.ReadAll(response.Body) - if err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("read response, %w", err), - } - } - - return &EndpointError{ - // no sensible value for Code - Message: string(msg), - Fault: stof(response.StatusCode), - statusCode: response.StatusCode, - } -} - -func deserializeJSONError(response *smithyhttp.Response) error { - var errShape *EndpointError - if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil { - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode error message, %w", err), - } - } - - errShape.Fault = stof(response.StatusCode) - errShape.statusCode = response.StatusCode - return errShape -} - -// maps HTTP status code to smithy ErrorFault -func stof(code int) smithy.ErrorFault { - if code >= 500 { - return smithy.FaultServer - } - return smithy.FaultClient -} - -func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go deleted file mode 100644 index 2386153a9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ /dev/null @@ -1,193 +0,0 @@ -// Package endpointcreds provides support for retrieving credentials from an -// arbitrary HTTP endpoint. -// -// The credentials endpoint Provider can receive both static and refreshable -// credentials that will expire. Credentials are static when an "Expiration" -// value is not provided in the endpoint's response. -// -// Static credentials will never expire once they have been retrieved. The format -// of the static credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } -// -// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration -// value in the response. The format of the refreshable credentials response: -// -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } -// -// Errors should be returned in the following format and only returned with 400 -// or 500 HTTP status codes. -// -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } -package endpointcreds - -import ( - "context" - "fmt" - "net/http" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" - "github.com/aws/smithy-go/middleware" -) - -// ProviderName is the name of the credentials provider. -const ProviderName = `CredentialsEndpointProvider` - -type getCredentialsAPIClient interface { - GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) -} - -// Provider satisfies the aws.CredentialsProvider interface, and is a client to -// retrieve credentials from an arbitrary endpoint. -type Provider struct { - // The AWS Client to make HTTP requests to the endpoint with. The endpoint - // the request will be made to is provided by the aws.Config's - // EndpointResolver. - client getCredentialsAPIClient - - options Options -} - -// HTTPClient is a client for sending HTTP requests -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Options is structure of configurable options for Provider -type Options struct { - // Endpoint to retrieve credentials from. Required - Endpoint string - - // HTTPClient to handle sending HTTP requests to the target endpoint. - HTTPClient HTTPClient - - // Set of options to modify how the credentials operation is invoked. - APIOptions []func(*middleware.Stack) error - - // The Retryer to be used for determining whether a failed requested should be retried - Retryer aws.Retryer - - // Optional authorization token value if set will be used as the value of - // the Authorization header of the endpoint credential request. - // - // When constructed from environment, the provider will use the value of - // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token - // - // Will be overridden if AuthorizationTokenProvider is configured - AuthorizationToken string - - // Optional auth provider func to dynamically load the auth token from a file - // everytime a credential is retrieved - // - // When constructed from environment, the provider will read and use the content - // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable - // as the auth token everytime credentials are retrieved - // - // Will override AuthorizationToken if configured - AuthorizationTokenProvider AuthTokenProvider -} - -// AuthTokenProvider defines an interface to dynamically load a value to be passed -// for the Authorization header of a credentials request. -type AuthTokenProvider interface { - GetToken() (string, error) -} - -// TokenProviderFunc is a func type implementing AuthTokenProvider interface -// and enables customizing token provider behavior -type TokenProviderFunc func() (string, error) - -// GetToken func retrieves auth token according to TokenProviderFunc implementation -func (p TokenProviderFunc) GetToken() (string, error) { - return p() -} - -// New returns a credentials Provider for retrieving AWS credentials -// from arbitrary endpoint. -func New(endpoint string, optFns ...func(*Options)) *Provider { - o := Options{ - Endpoint: endpoint, - } - - for _, fn := range optFns { - fn(&o) - } - - p := &Provider{ - client: client.New(client.Options{ - HTTPClient: o.HTTPClient, - Endpoint: o.Endpoint, - APIOptions: o.APIOptions, - Retryer: o.Retryer, - }), - options: o, - } - - return p -} - -// Retrieve will attempt to request the credentials from the endpoint the Provider -// was configured for. And error will be returned if the retrieval fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - resp, err := p.getCredentials(ctx) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) - } - - creds := aws.Credentials{ - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.Token, - Source: ProviderName, - AccountID: resp.AccountID, - } - - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { - authToken, err := p.resolveAuthToken() - if err != nil { - return nil, fmt.Errorf("resolve auth token: %v", err) - } - - return p.client.GetCredentials(ctx, &client.GetCredentialsInput{ - AuthorizationToken: authToken, - }) -} - -func (p *Provider) resolveAuthToken() (string, error) { - authToken := p.options.AuthorizationToken - - var err error - if p.options.AuthorizationTokenProvider != nil { - authToken, err = p.options.AuthorizationTokenProvider.GetToken() - if err != nil { - return "", err - } - } - - if strings.ContainsAny(authToken, "\r\n") { - return "", fmt.Errorf("authorization token contains invalid newline sequence") - } - - return authToken, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go deleted file mode 100644 index 8280a4b39..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package credentials - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.53" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go deleted file mode 100644 index a3137b8fa..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package processcreds is a credentials provider to retrieve credentials from a -// external CLI invoked process. -// -// WARNING: The following describes a method of sourcing credentials from an external -// process. This can potentially be dangerous, so proceed with caution. Other -// credential providers should be preferred if at all possible. If using this -// option, you should make sure that the config file is as locked down as possible -// using security best practices for your operating system. -// -// # Concurrency and caching -// -// The Provider is not safe to be used concurrently, and does not provide any -// caching of credentials retrieved. You should wrap the Provider with a -// `aws.CredentialsCache` to provide concurrency safety, and caching of -// credentials. -// -// # Loading credentials with the SDKs AWS Config -// -// You can use credentials from a AWS shared config `credential_process` in a -// variety of ways. -// -// One way is to setup your shared config file, located in the default -// location, with the `credential_process` key and the command you want to be -// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable -// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. -// -// [default] -// credential_process = /command/to/call -// -// Loading configuration using external will use the credential process to -// retrieve credentials. NOTE: If there are credentials in the profile you are -// using, the credential process will not be used. -// -// // Initialize a session to load credentials. -// cfg, _ := config.LoadDefaultConfig(context.TODO()) -// -// // Create S3 service client to use the credentials. -// svc := s3.NewFromConfig(cfg) -// -// # Loading credentials with the Provider directly -// -// Another way to use the credentials process provider is by using the -// `NewProvider` constructor to create the provider and providing a it with a -// command to be executed to retrieve credentials. -// -// The following example creates a credentials provider for a command, and wraps -// it with the CredentialsCache before assigning the provider to the Amazon S3 API -// client's Credentials option. -// -// // Create credentials using the Provider. -// provider := processcreds.NewProvider("/path/to/command") -// -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) -// -// If you need more control, you can set any configurable options in the -// credentials using one or more option functions. -// -// provider := processcreds.NewProvider("/path/to/command", -// func(o *processcreds.Options) { -// // Override the provider's default timeout -// o.Timeout = 2 * time.Minute -// }) -// -// You can also use your own `exec.Cmd` value by satisfying a value that satisfies -// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. -// -// // Create an exec.Cmd -// cmdBuilder := processcreds.NewCommandBuilderFunc( -// func(ctx context.Context) (*exec.Cmd, error) { -// cmd := exec.CommandContext(ctx, -// "customCLICommand", -// "-a", "argument", -// ) -// cmd.Env = []string{ -// "ENV_VAR_FOO=value", -// "ENV_VAR_BAR=other_value", -// } -// -// return cmd, nil -// }, -// ) -// -// // Create credentials using your exec.Cmd and custom timeout -// provider := processcreds.NewProviderCommand(cmdBuilder, -// func(opt *processcreds.Provider) { -// // optionally override the provider's default timeout -// opt.Timeout = 1 * time.Second -// }) -package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go deleted file mode 100644 index 911fcc327..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ /dev/null @@ -1,285 +0,0 @@ -package processcreds - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdkio" -) - -const ( - // ProviderName is the name this credentials provider will label any - // returned credentials Value with. - ProviderName = `ProcessProvider` - - // DefaultTimeout default limit on time a process can run. - DefaultTimeout = time.Duration(1) * time.Minute -) - -// ProviderError is an error indicating failure initializing or executing the -// process credentials provider -type ProviderError struct { - Err error -} - -// Error returns the error message. -func (e *ProviderError) Error() string { - return fmt.Sprintf("process provider error: %v", e.Err) -} - -// Unwrap returns the underlying error the provider error wraps. -func (e *ProviderError) Unwrap() error { - return e.Err -} - -// Provider satisfies the credentials.Provider interface, and is a -// client to retrieve credentials from a process. -type Provider struct { - // Provides a constructor for exec.Cmd that are invoked by the provider for - // retrieving credentials. Use this to provide custom creation of exec.Cmd - // with things like environment variables, or other configuration. - // - // The provider defaults to the DefaultNewCommand function. - commandBuilder NewCommandBuilder - - options Options -} - -// Options is the configuration options for configuring the Provider. -type Options struct { - // Timeout limits the time a process can run. - Timeout time.Duration -} - -// NewCommandBuilder provides the interface for specifying how command will be -// created that the Provider will use to retrieve credentials with. -type NewCommandBuilder interface { - NewCommand(context.Context) (*exec.Cmd, error) -} - -// NewCommandBuilderFunc provides a wrapper type around a function pointer to -// satisfy the NewCommandBuilder interface. -type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) - -// NewCommand calls the underlying function pointer the builder was initialized with. -func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { - return fn(ctx) -} - -// DefaultNewCommandBuilder provides the default NewCommandBuilder -// implementation used by the provider. It takes a command and arguments to -// invoke. The command will also be initialized with the current process -// environment variables, stderr, and stdin pipes. -type DefaultNewCommandBuilder struct { - Args []string -} - -// NewCommand returns an initialized exec.Cmd with the builder's initialized -// Args. The command is also initialized current process environment variables, -// stderr, and stdin pipes. -func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { - var cmdArgs []string - if runtime.GOOS == "windows" { - cmdArgs = []string{"cmd.exe", "/C"} - } else { - cmdArgs = []string{"sh", "-c"} - } - - if len(b.Args) == 0 { - return nil, &ProviderError{ - Err: fmt.Errorf("failed to prepare command: command must not be empty"), - } - } - - cmdArgs = append(cmdArgs, b.Args...) - cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) - cmd.Env = os.Environ() - - cmd.Stderr = os.Stderr // display stderr on console for MFA - cmd.Stdin = os.Stdin // enable stdin for MFA - - return cmd, nil -} - -// NewProvider returns a pointer to a new Credentials object wrapping the -// Provider. -// -// The provider defaults to the DefaultNewCommandBuilder for creating command -// the Provider will use to retrieve credentials with. -func NewProvider(command string, options ...func(*Options)) *Provider { - var args []string - - // Ensure that the command arguments are not set if the provided command is - // empty. This will error out when the command is executed since no - // arguments are specified. - if len(command) > 0 { - args = []string{command} - } - - commanBuilder := DefaultNewCommandBuilder{ - Args: args, - } - return NewProviderCommand(commanBuilder, options...) -} - -// NewProviderCommand returns a pointer to a new Credentials object with the -// specified command, and default timeout duration. Use this to provide custom -// creation of exec.Cmd for options like environment variables, or other -// configuration. -func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { - p := &Provider{ - commandBuilder: builder, - options: Options{ - Timeout: DefaultTimeout, - }, - } - - for _, option := range options { - option(&p.options) - } - - return p -} - -// A CredentialProcessResponse is the AWS credentials format that must be -// returned when executing an external credential_process. -type CredentialProcessResponse struct { - // As of this writing, the Version key must be set to 1. This might - // increment over time as the structure evolves. - Version int - - // The access key ID that identifies the temporary security credentials. - AccessKeyID string `json:"AccessKeyId"` - - // The secret access key that can be used to sign requests. - SecretAccessKey string - - // The token that users must pass to the service API to use the temporary credentials. - SessionToken string - - // The date on which the current credentials expire. - Expiration *time.Time - - // The ID of the account for credentials - AccountID string `json:"AccountId"` -} - -// Retrieve executes the credential process command and returns the -// credentials, or error if the command fails. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - out, err := p.executeCredentialProcess(ctx) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - // Serialize and validate response - resp := &CredentialProcessResponse{} - if err = json.Unmarshal(out, resp); err != nil { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), - } - } - - if resp.Version != 1 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("wrong version in process output (not 1)"), - } - } - - if len(resp.AccessKeyID) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing AccessKeyId in process output"), - } - } - - if len(resp.SecretAccessKey) == 0 { - return aws.Credentials{Source: ProviderName}, &ProviderError{ - Err: fmt.Errorf("missing SecretAccessKey in process output"), - } - } - - creds := aws.Credentials{ - Source: ProviderName, - AccessKeyID: resp.AccessKeyID, - SecretAccessKey: resp.SecretAccessKey, - SessionToken: resp.SessionToken, - AccountID: resp.AccountID, - } - - // Handle expiration - if resp.Expiration != nil { - creds.CanExpire = true - creds.Expires = *resp.Expiration - } - - return creds, nil -} - -// executeCredentialProcess starts the credential process on the OS and -// returns the results or an error. -func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { - if p.options.Timeout >= 0 { - var cancelFunc func() - ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) - defer cancelFunc() - } - - cmd, err := p.commandBuilder.NewCommand(ctx) - if err != nil { - return nil, err - } - - // get creds json on process's stdout - output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) - if cmd.Stdout != nil { - cmd.Stdout = io.MultiWriter(cmd.Stdout, output) - } else { - cmd.Stdout = output - } - - execCh := make(chan error, 1) - go executeCommand(cmd, execCh) - - select { - case execError := <-execCh: - if execError == nil { - break - } - select { - case <-ctx.Done(): - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("credential process timed out: %w", execError), - } - default: - return output.Bytes(), &ProviderError{ - Err: fmt.Errorf("error in credential_process: %w", execError), - } - } - } - - out := output.Bytes() - if runtime.GOOS == "windows" { - // windows adds slashes to quotes - out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) - } - - return out, nil -} - -func executeCommand(cmd *exec.Cmd, exec chan error) { - // Start the command - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - exec <- err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go deleted file mode 100644 index ece1e65f7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS -// credentials using an SSO access token. -// -// IMPORTANT: The provider in this package does not initiate or perform the AWS -// SSO login flow. The SDK provider expects that you have already performed the -// SSO login flow using AWS CLI using the "aws sso login" command, or by some -// other mechanism. The provider must find a valid non-expired access token for -// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not -// found, it is expired, or the file is malformed an error will be returned. -// -// # Loading AWS SSO credentials with the AWS shared configuration file -// -// You can use configure AWS SSO credentials from the AWS shared configuration file by -// specifying the required keys in the profile and referencing an sso-session: -// -// sso_session -// sso_account_id -// sso_role_name -// -// For example, the following defines a profile "devsso" and specifies the AWS -// SSO parameters that defines the target account, role, sign-on portal, and -// the region where the user portal is located. Note: all SSO arguments must be -// provided, or an error will be returned. -// -// [profile devsso] -// sso_session = dev-session -// sso_role_name = SSOReadOnlyRole -// sso_account_id = 123456789012 -// -// [sso-session dev-session] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_region = us-east-1 -// sso_registration_scopes = sso:account:access -// -// Using the config module, you can load the AWS SDK shared configuration, and -// specify that this profile be used to retrieve credentials. For example: -// -// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) -// if err != nil { -// return err -// } -// -// # Programmatically loading AWS SSO credentials directly -// -// You can programmatically construct the AWS SSO Provider in your application, -// and provide the necessary information to load and retrieve temporary -// credentials using an access token from ~/.aws/sso/cache. -// -// ssoClient := sso.NewFromConfig(cfg) -// ssoOidcClient := ssooidc.NewFromConfig(cfg) -// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") -// if err != nil { -// return err -// } -// -// var provider aws.CredentialsProvider -// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { -// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) -// }) -// -// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time -// provider = aws.NewCredentialsCache(provider) -// -// credentials, err := provider.Retrieve(context.TODO()) -// if err != nil { -// return err -// } -// -// It is important that you wrap the Provider with aws.CredentialsCache if you -// are programmatically constructing the provider directly. This prevents your -// application from accessing the cached access token and requesting new -// credentials each time the credentials are used. -// -// # Additional Resources -// -// Configuring the AWS CLI to use AWS Single Sign-On: -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -// -// AWS Single Sign-On User Guide: -// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go deleted file mode 100644 index 46ae2f923..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ /dev/null @@ -1,233 +0,0 @@ -package ssocreds - -import ( - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" -) - -var osUserHomeDur = shareddefaults.UserHomeDir - -// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or -// error if unable get derive the path. Key that will be used to compute a SHA1 -// value that is hex encoded. -// -// Derives the filepath using the Key as: -// -// ~/.aws/sso/cache/.json -func StandardCachedTokenFilepath(key string) (string, error) { - homeDir := osUserHomeDur() - if len(homeDir) == 0 { - return "", fmt.Errorf("unable to get USER's home directory for cached token") - } - hash := sha1.New() - if _, err := hash.Write([]byte(key)); err != nil { - return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) - } - - cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" - - return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil -} - -type tokenKnownFields struct { - AccessToken string `json:"accessToken,omitempty"` - ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` - - RefreshToken string `json:"refreshToken,omitempty"` - ClientID string `json:"clientId,omitempty"` - ClientSecret string `json:"clientSecret,omitempty"` -} - -type token struct { - tokenKnownFields - UnknownFields map[string]interface{} `json:"-"` -} - -func (t token) MarshalJSON() ([]byte, error) { - fields := map[string]interface{}{} - - setTokenFieldString(fields, "accessToken", t.AccessToken) - setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) - - setTokenFieldString(fields, "refreshToken", t.RefreshToken) - setTokenFieldString(fields, "clientId", t.ClientID) - setTokenFieldString(fields, "clientSecret", t.ClientSecret) - - for k, v := range t.UnknownFields { - if _, ok := fields[k]; ok { - return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) - } - fields[k] = v - } - - return json.Marshal(fields) -} - -func setTokenFieldString(fields map[string]interface{}, key, value string) { - if value == "" { - return - } - fields[key] = value -} -func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { - if value == nil { - return - } - fields[key] = value -} - -func (t *token) UnmarshalJSON(b []byte) error { - var fields map[string]interface{} - if err := json.Unmarshal(b, &fields); err != nil { - return nil - } - - t.UnknownFields = map[string]interface{}{} - - for k, v := range fields { - var err error - switch k { - case "accessToken": - err = getTokenFieldString(v, &t.AccessToken) - case "expiresAt": - err = getTokenFieldRFC3339(v, &t.ExpiresAt) - case "refreshToken": - err = getTokenFieldString(v, &t.RefreshToken) - case "clientId": - err = getTokenFieldString(v, &t.ClientID) - case "clientSecret": - err = getTokenFieldString(v, &t.ClientSecret) - default: - t.UnknownFields[k] = v - } - - if err != nil { - return fmt.Errorf("field %q, %w", k, err) - } - } - - return nil -} - -func getTokenFieldString(v interface{}, value *string) error { - var ok bool - *value, ok = v.(string) - if !ok { - return fmt.Errorf("expect value to be string, got %T", v) - } - return nil -} - -func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { - var stringValue string - if err := getTokenFieldString(v, &stringValue); err != nil { - return err - } - - timeValue, err := parseRFC3339(stringValue) - if err != nil { - return err - } - - *value = &timeValue - return nil -} - -func loadCachedToken(filename string) (token, error) { - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) - } - - var t token - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) - } - - if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { - return token{}, fmt.Errorf( - "cached SSO token must contain accessToken and expiresAt fields") - } - - return t, nil -} - -func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { - tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { - return err - } - - if err := os.Rename(tmpFilename, filename); err != nil { - return fmt.Errorf("failed to replace old cached SSO token file, %w", err) - } - - return nil -} - -func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { - var f *os.File - f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) - if err != nil { - return fmt.Errorf("failed to create cached SSO token file %w", err) - } - - defer func() { - closeErr := f.Close() - if err == nil && closeErr != nil { - err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) - } - }() - - encoder := json.NewEncoder(f) - - if err = encoder.Encode(t); err != nil { - return fmt.Errorf("failed to serialize cached SSO token, %w", err) - } - - return nil -} - -type rfc3339 time.Time - -func parseRFC3339(v string) (rfc3339, error) { - parsed, err := time.Parse(time.RFC3339, v) - if err != nil { - return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) - } - - return rfc3339(parsed), nil -} - -func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { - var value string - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // unquoting rules. - if err = json.Unmarshal(bytes, &value); err != nil { - return err - } - - *r, err = parseRFC3339(value) - - return nil -} - -func (r *rfc3339) MarshalJSON() ([]byte, error) { - value := time.Time(*r).UTC().Format(time.RFC3339) - - // Use JSON unmarshal to unescape the quoted value making use of JSON's - // quoting rules. - return json.Marshal(value) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go deleted file mode 100644 index 8c230be8e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ /dev/null @@ -1,153 +0,0 @@ -package ssocreds - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sso" -) - -// ProviderName is the name of the provider used to specify the source of -// credentials. -const ProviderName = "SSOProvider" - -// GetRoleCredentialsAPIClient is a API client that implements the -// GetRoleCredentials operation. -type GetRoleCredentialsAPIClient interface { - GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( - *sso.GetRoleCredentialsOutput, error, - ) -} - -// Options is the Provider options structure. -type Options struct { - // The Client which is configured for the AWS Region where the AWS SSO user - // portal is located. - Client GetRoleCredentialsAPIClient - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) - // user portal. - StartURL string - - // The filepath the cached token will be retrieved from. If unset Provider will - // use the startURL to determine the filepath at. - // - // ~/.aws/sso/cache/.json - // - // If custom cached token filepath is used, the Provider's startUrl - // parameter will be ignored. - CachedTokenFilepath string - - // Used by the SSOCredentialProvider if a token configuration - // profile is used in the shared config - SSOTokenProvider *SSOTokenProvider -} - -// Provider is an AWS credential provider that retrieves temporary AWS -// credentials by exchanging an SSO login token. -type Provider struct { - options Options - - cachedTokenFilepath string -} - -// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The -// provided client is expected to be configured for the AWS Region where the -// AWS SSO user portal is located. -func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { - options := Options{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(&options) - } - - return &Provider{ - options: options, - cachedTokenFilepath: options.CachedTokenFilepath, - } -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon -// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present -// in ~/.aws/sso/cache. However, if a token provider configuration exists -// in the shared config, then we ought to use the token provider rather then -// direct access on the cached token. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - var accessToken *string - if p.options.SSOTokenProvider != nil { - token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return aws.Credentials{}, err - } - accessToken = &token.Value - } else { - if p.cachedTokenFilepath == "" { - cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - p.cachedTokenFilepath = cachedTokenFilepath - } - - tokenFile, err := loadCachedToken(p.cachedTokenFilepath) - if err != nil { - return aws.Credentials{}, &InvalidTokenError{Err: err} - } - - if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { - return aws.Credentials{}, &InvalidTokenError{} - } - accessToken = &tokenFile.AccessToken - } - - output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: accessToken, - AccountId: &p.options.AccountID, - RoleName: &p.options.RoleName, - }) - if err != nil { - return aws.Credentials{}, err - } - - return aws.Credentials{ - AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.ToString(output.RoleCredentials.SessionToken), - CanExpire: true, - Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), - Source: ProviderName, - AccountID: p.options.AccountID, - }, nil -} - -// InvalidTokenError is the error type that is returned if loaded token has -// expired or is otherwise invalid. To refresh the SSO session run AWS SSO -// login with the corresponding profile. -type InvalidTokenError struct { - Err error -} - -func (i *InvalidTokenError) Unwrap() error { - return i.Err -} - -func (i *InvalidTokenError) Error() string { - const msg = "the SSO session has expired or is invalid" - if i.Err == nil { - return msg - } - return msg + ": " + i.Err.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go deleted file mode 100644 index 7f4fc5467..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go +++ /dev/null @@ -1,147 +0,0 @@ -package ssocreds - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/ssooidc" - "github.com/aws/smithy-go/auth/bearer" -) - -// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API -// client for calling CreateToken operation to refresh the SSO token. -type CreateTokenAPIClient interface { - CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( - *ssooidc.CreateTokenOutput, error, - ) -} - -// SSOTokenProviderOptions provides the options for configuring the -// SSOTokenProvider. -type SSOTokenProviderOptions struct { - // Client that can be overridden - Client CreateTokenAPIClient - - // The set of API Client options to be applied when invoking the - // CreateToken operation. - ClientOptions []func(*ssooidc.Options) - - // The path the file containing the cached SSO token will be read from. - // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. - CachedTokenFilepath string -} - -// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for -// Bearer Authentication. The SSOTokenProvider can only be used to refresh -// already cached SSO Tokens. This utility cannot perform the initial SSO -// create token. -// -// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in -// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's -// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with -// the smithy-go TokenCache, if the external configuration loaded configured -// for an SSO session. -// -// The initial SSO create token should be preformed with the AWS CLI before the -// Go application using the SSOTokenProvider will need to retrieve the SSO -// token. If the AWS CLI has not created the token cache file, this provider -// will return an error when attempting to retrieve the cached token. -// -// This provider will attempt to refresh the cached SSO token periodically if -// needed when RetrieveBearerToken is called. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. -// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -type SSOTokenProvider struct { - options SSOTokenProviderOptions -} - -var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) - -// NewSSOTokenProvider returns an initialized SSOTokenProvider that will -// periodically refresh the SSO token cached stored in the cachedTokenFilepath. -// The cachedTokenFilepath file's content will be rewritten by the token -// provider when the token is refreshed. -// -// The client must be configured for the AWS region the SSO token was created for. -func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { - options := SSOTokenProviderOptions{ - Client: client, - CachedTokenFilepath: cachedTokenFilepath, - } - for _, fn := range optFns { - fn(&options) - } - - provider := &SSOTokenProvider{ - options: options, - } - - return provider -} - -// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath -// the SSOTokenProvider was created with. If the token has expired -// RetrieveBearerToken will attempt to refresh it. If the token cannot be -// refreshed or is not present an error will be returned. -// -// A utility such as the AWS CLI must be used to initially create the SSO -// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html -func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { - cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) - if err != nil { - return bearer.Token{}, err - } - - if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { - cachedToken, err = p.refreshToken(ctx, cachedToken) - if err != nil { - return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) - } - } - - expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) - return bearer.Token{ - Value: cachedToken.AccessToken, - CanExpire: !expiresAt.IsZero(), - Expires: expiresAt, - }, nil -} - -func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { - if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { - return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") - } - - createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ - ClientId: &cachedToken.ClientID, - ClientSecret: &cachedToken.ClientSecret, - RefreshToken: &cachedToken.RefreshToken, - GrantType: aws.String("refresh_token"), - }, p.options.ClientOptions...) - if err != nil { - return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) - } - - expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) - - cachedToken.AccessToken = aws.ToString(createResult.AccessToken) - cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) - cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) - - fileInfo, err := os.Stat(p.options.CachedTokenFilepath) - if err != nil { - return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) - } - - if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { - return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) - } - - return cachedToken, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go deleted file mode 100644 index d525cac09..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ /dev/null @@ -1,53 +0,0 @@ -package credentials - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - // StaticCredentialsName provides a name of Static provider - StaticCredentialsName = "StaticCredentials" -) - -// StaticCredentialsEmptyError is emitted when static credentials are empty. -type StaticCredentialsEmptyError struct{} - -func (*StaticCredentialsEmptyError) Error() string { - return "static credentials are empty" -} - -// A StaticCredentialsProvider is a set of credentials which are set, and will -// never expire. -type StaticCredentialsProvider struct { - Value aws.Credentials -} - -// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS -// credentials passed in. -func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { - return StaticCredentialsProvider{ - Value: aws.Credentials{ - AccessKeyID: key, - SecretAccessKey: secret, - SessionToken: session, - }, - } -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { - v := s.Value - if v.AccessKeyID == "" || v.SecretAccessKey == "" { - return aws.Credentials{ - Source: StaticCredentialsName, - }, &StaticCredentialsEmptyError{} - } - - if len(v.Source) == 0 { - v.Source = StaticCredentialsName - } - - return v, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 4c7f7993f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. -// -// The SDK will ensure that per instance of credentials.Credentials all requests -// to refresh the credentials will be synchronized. But, the SDK is unable to -// ensure synchronous usage of the AssumeRoleProvider if the value is shared -// between multiple Credentials or service clients. -// -// # Assume Role -// -// To assume an IAM role using STS with the SDK you can create a new Credentials -// with the SDKs's stscreds package. -// -// // Initial credentials loaded from SDK's default credential chain. Such as -// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance -// // Role. These credentials will be used to to make the STS Assume Role API. -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN. -// stsSvc := sts.NewFromConfig(cfg) -// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with custom MFA Token provider -// -// To assume an IAM role with a MFA token you can either specify a custom MFA -// token provider or use the SDK's built in StdinTokenProvider that will prompt -// the user for a token code each time the credentials need to to be refreshed. -// Specifying a custom token provider allows you to control where the token -// code is retrieved from, and how it is refreshed. -// -// With a custom token provider, the provider is responsible for refreshing the -// token code when called. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// staticTokenProvider := func() (string, error) { -// return someTokenCode, nil -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = staticTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -// -// # Assume Role with MFA Token Provider -// -// To assume an IAM role with MFA for longer running tasks where the credentials -// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider -// will allow the credential provider to prompt for new MFA token code when the -// role's credentials need to be refreshed. -// -// The StdinTokenProvider function is available to prompt on stdin to retrieve -// the MFA token code from the user. You can also implement custom prompts by -// satisfying the TokenProvider function signature. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely. -// -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = stscreds.StdinTokenProvider -// }) -// -// cfg.Credentials = aws.NewCredentialsCache(creds) -// -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) -package stscreds - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. -// An error is returned if reading from stdin fails. -// -// Use this function go read MFA tokens from stdin. The function makes no attempt -// to make atomic prompts from stdin across multiple gorouties. -// -// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will -// have undesirable results as the StdinTokenProvider will not be synchronized. A -// single Credentials with an AssumeRoleProvider can be shared safely -// -// Will wait forever until something is provided on the stdin. -func StdinTokenProvider() (string, error) { - var v string - fmt.Printf("Assume Role MFA token code: ") - _, err := fmt.Scanln(&v) - - return v, err -} - -// ProviderName provides a name of AssumeRole provider -const ProviderName = "AssumeRoleProvider" - -// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. -type AssumeRoleAPIClient interface { - AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) -} - -// DefaultDuration is the default amount of time in minutes that the -// credentials will be valid for. This value is only used by AssumeRoleProvider -// for specifying the default expiry duration of an assume role. -// -// Other providers such as WebIdentityRoleProvider do not use this value, and -// instead rely on STS API's default parameter handing to assign a default -// value. -var DefaultDuration = time.Duration(15) * time.Minute - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. -// -// This credential provider will be used by the SDKs default credential change -// when shared configuration is enabled, and the shared config or shared credentials -// file configure assume role. See Session docs for how to do this. -// -// AssumeRoleProvider does not provide any synchronization and it is not safe -// to share this value across multiple Credentials, Sessions, or service clients -// without also sharing the same Credentials instance. -type AssumeRoleProvider struct { - options AssumeRoleOptions -} - -// AssumeRoleOptions is the configurable options for AssumeRoleProvider -type AssumeRoleOptions struct { - // Client implementation of the AssumeRole operation. Required - Client AssumeRoleAPIClient - - // IAM Role ARN to be assumed. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // Optional ExternalID to pass along, defaults to nil if not set. - ExternalID *string - - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. - Policy *string - - // The ARNs of IAM managed policies you want to use as managed session policies. - // The policies must exist in the same account as the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. - // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's identity-based - // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. - PolicyARNs []types.PolicyDescriptorType - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy - // of the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as GAHT12345678) - // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. - SourceIdentity *string - - // Async method of providing MFA token code for assuming an IAM role with MFA. - // The value returned by the function will be used as the TokenCode in the Retrieve - // call. See StdinTokenProvider for a provider that prompts and reads from stdin. - // - // This token provider will be called when ever the assumed role's - // credentials need to be refreshed when SerialNumber is set. - TokenProvider func() (string, error) - - // A list of session tags that you want to pass. Each session tag consists of a key - // name and an associated value. For more information about session tags, see - // Tagging STS Sessions - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the - // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. - Tags []types.Tag - - // A list of keys for session tags that you want to set as transitive. If you set a - // tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags - // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. - TransitiveTagKeys []string -} - -// NewAssumeRoleProvider constructs and returns a credentials provider that -// will retrieve credentials by assuming a IAM role using STS. -func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { - o := AssumeRoleOptions{ - Client: client, - RoleARN: roleARN, - } - - for _, fn := range optFns { - fn(&o) - } - - return &AssumeRoleProvider{ - options: o, - } -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - // Apply defaults where parameters are not set. - if len(p.options.RoleSessionName) == 0 { - // Try to work out a role name that will hopefully end up unique. - p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) - } - if p.options.Duration == 0 { - // Expire as often as AWS permits. - p.options.Duration = DefaultDuration - } - input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), - PolicyArns: p.options.PolicyARNs, - RoleArn: aws.String(p.options.RoleARN), - RoleSessionName: aws.String(p.options.RoleSessionName), - ExternalId: p.options.ExternalID, - SourceIdentity: p.options.SourceIdentity, - Tags: p.options.Tags, - TransitiveTagKeys: p.options.TransitiveTagKeys, - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - if p.options.SerialNumber != nil { - if p.options.TokenProvider != nil { - input.SerialNumber = p.options.SerialNumber - code, err := p.options.TokenProvider() - if err != nil { - return aws.Credentials{}, err - } - input.TokenCode = aws.String(code) - } else { - return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") - } - } - - resp, err := p.options.Client.AssumeRole(ctx, input) - if err != nil { - return aws.Credentials{Source: ProviderName}, err - } - - var accountID string - if resp.AssumedRoleUser != nil { - accountID = getAccountID(resp.AssumedRoleUser) - } - - return aws.Credentials{ - AccessKeyID: *resp.Credentials.AccessKeyId, - SecretAccessKey: *resp.Credentials.SecretAccessKey, - SessionToken: *resp.Credentials.SessionToken, - Source: ProviderName, - - CanExpire: true, - Expires: *resp.Credentials.Expiration, - AccountID: accountID, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go deleted file mode 100644 index b4b719708..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ /dev/null @@ -1,169 +0,0 @@ -package stscreds - -import ( - "context" - "fmt" - "io/ioutil" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sts" - "github.com/aws/aws-sdk-go-v2/service/sts/types" -) - -var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() - -const ( - // WebIdentityProviderName is the web identity provider name - WebIdentityProviderName = "WebIdentityCredentials" -) - -// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. -type AssumeRoleWithWebIdentityAPIClient interface { - AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) -} - -// WebIdentityRoleProvider is used to retrieve credentials using -// an OIDC token. -type WebIdentityRoleProvider struct { - options WebIdentityRoleOptions -} - -// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider -type WebIdentityRoleOptions struct { - // Client implementation of the AssumeRoleWithWebIdentity operation. Required - Client AssumeRoleWithWebIdentityAPIClient - - // JWT Token Provider. Required - TokenRetriever IdentityTokenRetriever - - // IAM Role ARN to assume. Required - RoleARN string - - // Session name, if you wish to uniquely identify this session. - RoleSessionName string - - // Expiry duration of the STS credentials. STS will assign a default expiry - // duration if this value is unset. This is different from the Duration - // option of AssumeRoleProvider, which automatically assigns 15 minutes if - // Duration is unset. - // - // See the STS AssumeRoleWithWebIdentity API reference guide for more - // information on defaults. - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - Duration time.Duration - - // An IAM policy in JSON format that you want to use as an inline session policy. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you - // want to use as managed session policies. The policies must exist in the - // same account as the role. - PolicyARNs []types.PolicyDescriptorType -} - -// IdentityTokenRetriever is an interface for retrieving a JWT -type IdentityTokenRetriever interface { - GetIdentityToken() ([]byte, error) -} - -// IdentityTokenFile is for retrieving an identity token from the given file name -type IdentityTokenFile string - -// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte -func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { - b, err := ioutil.ReadFile(string(j)) - if err != nil { - return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) - } - - return b, nil -} - -// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the -// provided stsiface.ClientAPI -func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { - o := WebIdentityRoleOptions{ - Client: client, - RoleARN: roleARN, - TokenRetriever: tokenRetriever, - } - - for _, fn := range optFns { - fn(&o) - } - - return &WebIdentityRoleProvider{options: o} -} - -// Retrieve will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. -func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { - b, err := p.options.TokenRetriever.GetIdentityToken() - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) - } - - sessionName := p.options.RoleSessionName - if len(sessionName) == 0 { - // session name is used to uniquely identify a session. This simply - // uses unix time in nanoseconds to uniquely identify sessions. - sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) - } - input := &sts.AssumeRoleWithWebIdentityInput{ - PolicyArns: p.options.PolicyARNs, - RoleArn: &p.options.RoleARN, - RoleSessionName: &sessionName, - WebIdentityToken: aws.String(string(b)), - } - if p.options.Duration != 0 { - // If set use the value, otherwise STS will assign a default expiration duration. - input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) - } - if p.options.Policy != nil { - input.Policy = p.options.Policy - } - - resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { - options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) - }) - if err != nil { - return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) - } - - var accountID string - if resp.AssumedRoleUser != nil { - accountID = getAccountID(resp.AssumedRoleUser) - } - - // InvalidIdentityToken error is a temporary error that can occur - // when assuming an Role with a JWT web identity token. - - value := aws.Credentials{ - AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), - SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), - SessionToken: aws.ToString(resp.Credentials.SessionToken), - Source: WebIdentityProviderName, - CanExpire: true, - Expires: *resp.Credentials.Expiration, - AccountID: accountID, - } - return value, nil -} - -// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" -func getAccountID(u *types.AssumedRoleUser) string { - if u.Arn == nil { - return "" - } - parts := strings.Split(*u.Arn, ":") - if len(parts) < 5 { - return "" - } - return parts[4] -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md deleted file mode 100644 index 82ed689dc..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ /dev/null @@ -1,409 +0,0 @@ -# v1.16.24 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.23 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.22 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.21 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.20 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2024-03-21) - -* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. - -# v1.15.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.11 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.9 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.7 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-11-02) - -* No change notes available for this release. - -# v1.14.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2023-03-14) - -* **Feature**: Add flag to disable IMDSv1 fallback - -# v1.12.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.19 (2022-10-24) - -* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. -* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-07-15) - -* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go deleted file mode 100644 index 3f4a10e2c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ /dev/null @@ -1,352 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "net" - "net/http" - "os" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ServiceID provides the unique name of this API client -const ServiceID = "ec2imds" - -// Client provides the API client for interacting with the Amazon EC2 Instance -// Metadata Service API. -type Client struct { - options Options -} - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState = internalconfig.ClientEnableState - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior - ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled - ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled -) - -// EndpointModeState is an enum configuration variable describing the client endpoint mode. -// Not configurable directly, but used when using the NewFromConfig. -type EndpointModeState = internalconfig.EndpointModeState - -// Enumeration values for EndpointModeState -const ( - EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset - EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 - EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 -) - -const ( - disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" - - // Client endpoint options - endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - - defaultIPv4Endpoint = "http://169.254.169.254" - defaultIPv6Endpoint = "http://[fd00:ec2::254]" -) - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - options.HTTPClient = resolveHTTPClient(options.HTTPClient) - - if options.Retryer == nil { - options.Retryer = retry.NewStandard() - } - options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) - - if options.ClientEnableState == ClientDefaultEnableState { - if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { - options.ClientEnableState = ClientDisabled - } - } - - if len(options.Endpoint) == 0 { - if v := os.Getenv(endpointEnvVar); len(v) != 0 { - options.Endpoint = v - } - } - - client := &Client{ - options: options, - } - - if client.options.tokenProvider == nil && !client.options.disableAPIToken { - client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) - } - - return client -} - -// NewFromConfig returns an initialized Client based the AWS SDK config, and -// functional options. Provide additional functional options to further -// configure the behavior of the client, such as changing the client's endpoint -// or adding custom middleware behavior. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), - HTTPClient: cfg.HTTPClient, - ClientLogMode: cfg.ClientLogMode, - Logger: cfg.Logger, - } - - if cfg.Retryer != nil { - opts.Retryer = cfg.Retryer() - } - - resolveClientEnableState(cfg, &opts) - resolveEndpointConfig(cfg, &opts) - resolveEndpointModeConfig(cfg, &opts) - resolveEnableFallback(cfg, &opts) - - return New(opts, optFns...) -} - -// Options provides the fields for configuring the API client's behavior. -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation - // call to modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The endpoint the client will use to retrieve EC2 instance metadata. - // - // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. - // - // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT - // has a value the client will use the value of the environment variable as - // the endpoint for operation calls. - // - // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] - Endpoint string - - // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. - // - // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. - // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. - // - // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. - EndpointMode EndpointModeState - - // The HTTP client to invoke API calls with. Defaults to client's default - // HTTP implementation if nil. - HTTPClient HTTPClient - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. - Retryer aws.Retryer - - // Changes if the EC2 Instance Metadata client is enabled or not. Client - // will default to enabled if not set to ClientDisabled. When the client is - // disabled it will return an error for all operation calls. - // - // If ClientEnableState value is ClientDefaultEnableState (default value), - // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to - // "true", the client will be disabled. - // - // AWS_EC2_METADATA_DISABLED=true - ClientEnableState ClientEnableState - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // Configure IMDSv1 fallback behavior. By default, the client will attempt - // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] - // the client will return any errors encountered from attempting to fetch a token - // instead of silently using the insecure data flow of IMDSv1. - // - // See [configuring IMDS] for more information. - // - // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - EnableFallback aws.Ternary - - // By default, all IMDS client operations enforce a 5-second timeout. You - // can disable that behavior with this setting. - DisableDefaultTimeout bool - - // provides the caching of API tokens used for operation calls. If unset, - // the API token will not be retrieved for the operation. - tokenProvider *tokenProvider - - // option to disable the API token provider for testing. - disableAPIToken bool -} - -// HTTPClient provides the interface for a client making HTTP requests with the -// API. -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -// Copy creates a copy of the API options. -func (o Options) Copy() Options { - to := o - to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) - return to -} - -// WithAPIOptions wraps the API middleware functions, as a functional option -// for the API Client Options. Use this helper to add additional functional -// options to the API client, or operation calls. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), - stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - for _, fn := range optFns { - fn(&options) - } - - if options.ClientEnableState == ClientDisabled { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: fmt.Errorf( - "access disabled to EC2 IMDS via client option, or %q environment variable", - disableClientEnvVar), - } - } - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) - if err != nil { - return nil, metadata, &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - return result, metadata, err -} - -const ( - // HTTP client constants - defaultDialerTimeout = 250 * time.Millisecond - defaultResponseHeaderTimeout = 500 * time.Millisecond -) - -func resolveHTTPClient(client HTTPClient) HTTPClient { - if client == nil { - client = awshttp.NewBuildableClient() - } - - if c, ok := client.(*awshttp.BuildableClient); ok { - client = c. - WithDialerOptions(func(d *net.Dialer) { - // Use a custom Dial timeout for the EC2 Metadata service to account - // for the possibility the application might not be running in an - // environment with the service present. The client should fail fast in - // this case. - d.Timeout = defaultDialerTimeout - }). - WithTransportOptions(func(tr *http.Transport) { - // Use a custom Transport timeout for the EC2 Metadata service to - // account for the possibility that the application might be running in - // a container, and EC2Metadata service drops the connection after a - // single IP Hop. The client should fail fast in this case. - tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout - }) - } - - return client -} - -func resolveClientEnableState(cfg aws.Config, options *Options) error { - if options.ClientEnableState != ClientDefaultEnableState { - return nil - } - value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.ClientEnableState = value - return nil -} - -func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { - if options.EndpointMode != EndpointModeStateUnset { - return nil - } - value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.EndpointMode = value - return nil -} - -func resolveEndpointConfig(cfg aws.Config, options *Options) error { - if len(options.Endpoint) != 0 { - return nil - } - value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) - if err != nil || !found { - return err - } - options.Endpoint = value - return nil -} - -func resolveEnableFallback(cfg aws.Config, options *Options) { - if options.EnableFallback != aws.UnknownTernary { - return - } - - disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) - if !ok { - return - } - - if disabled { - options.EnableFallback = aws.FalseTernary - } else { - options.EnableFallback = aws.TrueTernary - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go deleted file mode 100644 index af58b6bb1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getDynamicDataPath = "/latest/dynamic" - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { - if params == nil { - params = &GetDynamicDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, - addGetDynamicDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetDynamicDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetDynamicDataInput provides the input parameters for the GetDynamicData -// operation. -type GetDynamicDataInput struct { - // The relative dynamic data path to retrieve. Can be empty string to - // retrieve a response containing a new line separated list of dynamic data - // resources available. - // - // Must not include the dynamic data base path. - // - // May include leading slash. If Path includes trailing slash the trailing - // slash will be included in the request for the resource. - Path string -} - -// GetDynamicDataOutput provides the output parameters for the GetDynamicData -// operation. -type GetDynamicDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetDynamicData", - buildGetDynamicDataPath, - buildGetDynamicDataOutput) -} - -func buildGetDynamicDataPath(params interface{}) (string, error) { - p, ok := params.(*GetDynamicDataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getDynamicDataPath, p.Path), nil -} - -func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetDynamicDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go deleted file mode 100644 index 5111cc90c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go +++ /dev/null @@ -1,103 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getIAMInfoPath = getMetadataPath + "/iam/info" - -// GetIAMInfo retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetIAMInfo( - ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), -) ( - *GetIAMInfoOutput, error, -) { - if params == nil { - params = &GetIAMInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, - addGetIAMInfoMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetIAMInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. -type GetIAMInfoInput struct{} - -// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. -type GetIAMInfoOutput struct { - IAMInfo - - ResultMetadata middleware.Metadata -} - -func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetIAMInfo", - buildGetIAMInfoPath, - buildGetIAMInfoOutput, - ) -} - -func buildGetIAMInfoPath(params interface{}) (string, error) { - return getIAMInfoPath, nil -} - -func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - imdsResult := &GetIAMInfoOutput{} - if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - // Any code other success is an error - if !strings.EqualFold(imdsResult.Code, "success") { - return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", - imdsResult.Code) - } - - return imdsResult, nil -} - -// IAMInfo provides the shape for unmarshaling an IAM info from the metadata -// API. -type IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go deleted file mode 100644 index dc8c09edf..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go +++ /dev/null @@ -1,110 +0,0 @@ -package imds - -import ( - "context" - "encoding/json" - "fmt" - "io" - "time" - - "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetInstanceIdentityDocument( - ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), -) ( - *GetInstanceIdentityDocumentOutput, error, -) { - if params == nil { - params = &GetInstanceIdentityDocumentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, - addGetInstanceIdentityDocumentMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetInstanceIdentityDocumentOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetInstanceIdentityDocumentInput provides the input parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentInput struct{} - -// GetInstanceIdentityDocumentOutput provides the output parameters for -// GetInstanceIdentityDocument operation. -type GetInstanceIdentityDocumentOutput struct { - InstanceIdentityDocument - - ResultMetadata middleware.Metadata -} - -func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetInstanceIdentityDocument", - buildGetInstanceIdentityDocumentPath, - buildGetInstanceIdentityDocumentOutput, - ) -} - -func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { - return getInstanceIdentityDocumentPath, nil -} - -func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(resp.Body, ringBuffer) - - output := &GetInstanceIdentityDocumentOutput{} - if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { - return nil, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode instance identity document, %w", err), - Snapshot: ringBuffer.Bytes(), - } - } - - return output, nil -} - -// InstanceIdentityDocument provides the shape for unmarshaling -// an instance identity document -type InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - MarketplaceProductCodes []string `json:"marketplaceProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go deleted file mode 100644 index 869bfc9fe..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go +++ /dev/null @@ -1,77 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getMetadataPath = "/latest/meta-data" - -// GetMetadata uses the path provided to request information from the Amazon -// EC2 Instance Metadata Service. The content will be returned as a string, or -// error if the request failed. -func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { - if params == nil { - params = &GetMetadataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, - addGetMetadataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetMetadataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetMetadataInput provides the input parameters for the GetMetadata -// operation. -type GetMetadataInput struct { - // The relative metadata path to retrieve. Can be empty string to retrieve - // a response containing a new line separated list of metadata resources - // available. - // - // Must not include the metadata base path. - // - // May include leading slash. If Path includes trailing slash the trailing slash - // will be included in the request for the resource. - Path string -} - -// GetMetadataOutput provides the output parameters for the GetMetadata -// operation. -type GetMetadataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetMetadata", - buildGetMetadataPath, - buildGetMetadataOutput) -} - -func buildGetMetadataPath(params interface{}) (string, error) { - p, ok := params.(*GetMetadataInput) - if !ok { - return "", fmt.Errorf("unknown parameter type %T", params) - } - - return appendURIPath(getMetadataPath, p.Path), nil -} - -func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetMetadataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go deleted file mode 100644 index 8c0572bb5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go +++ /dev/null @@ -1,73 +0,0 @@ -package imds - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// GetRegion retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *Client) GetRegion( - ctx context.Context, params *GetRegionInput, optFns ...func(*Options), -) ( - *GetRegionOutput, error, -) { - if params == nil { - params = &GetRegionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, - addGetRegionMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetRegionOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetRegionInput provides the input parameters for GetRegion operation. -type GetRegionInput struct{} - -// GetRegionOutput provides the output parameters for GetRegion operation. -type GetRegionOutput struct { - Region string - - ResultMetadata middleware.Metadata -} - -func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetRegion", - buildGetInstanceIdentityDocumentPath, - buildGetRegionOutput, - ) -} - -func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { - out, err := buildGetInstanceIdentityDocumentOutput(resp) - if err != nil { - return nil, err - } - - result, ok := out.(*GetInstanceIdentityDocumentOutput) - if !ok { - return nil, fmt.Errorf("unexpected instance identity document type, %T", out) - } - - region := result.Region - if len(region) == 0 { - return "", fmt.Errorf("instance metadata did not return a region value") - } - - return &GetRegionOutput{ - Region: region, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go deleted file mode 100644 index 1f9ee97a5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go +++ /dev/null @@ -1,119 +0,0 @@ -package imds - -import ( - "context" - "fmt" - "io" - "strconv" - "strings" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getTokenPath = "/latest/api/token" -const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" - -// getToken uses the duration to return a token for EC2 IMDS, or an error if -// the request failed. -func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { - if params == nil { - params = &getTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, - addGetTokenMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*getTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type getTokenInput struct { - TokenTTL time.Duration -} - -type getTokenOutput struct { - Token string - TokenTTL time.Duration - - ResultMetadata middleware.Metadata -} - -func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { - err := addRequestMiddleware(stack, - options, - "PUT", - "GetToken", - buildGetTokenPath, - buildGetTokenOutput) - if err != nil { - return err - } - - err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) - if err != nil { - return err - } - - return nil -} - -func buildGetTokenPath(interface{}) (string, error) { - return getTokenPath, nil -} - -func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { - defer func() { - closeErr := resp.Body.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) - } - }() - - ttlHeader := resp.Header.Get(tokenTTLHeader) - tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) - if err != nil { - return nil, fmt.Errorf("unable to parse API token, %w", err) - } - - var token strings.Builder - if _, err = io.Copy(&token, resp.Body); err != nil { - return nil, fmt.Errorf("unable to read API token, %w", err) - } - - return &getTokenOutput{ - Token: token.String(), - TokenTTL: time.Duration(tokenTTL) * time.Second, - }, nil -} - -type tokenTTLRequestHeader struct{} - -func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } -func (*tokenTTLRequestHeader) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) - } - - input, ok := in.Parameters.(*getTokenInput) - if !ok { - return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) - } - - req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go deleted file mode 100644 index 890369724..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go +++ /dev/null @@ -1,61 +0,0 @@ -package imds - -import ( - "context" - "io" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const getUserDataPath = "/latest/user-data" - -// GetUserData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { - if params == nil { - params = &GetUserDataInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, - addGetUserDataMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*GetUserDataOutput) - out.ResultMetadata = metadata - return out, nil -} - -// GetUserDataInput provides the input parameters for the GetUserData -// operation. -type GetUserDataInput struct{} - -// GetUserDataOutput provides the output parameters for the GetUserData -// operation. -type GetUserDataOutput struct { - Content io.ReadCloser - - ResultMetadata middleware.Metadata -} - -func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { - return addAPIRequestMiddleware(stack, - options, - "GetUserData", - buildGetUserDataPath, - buildGetUserDataOutput) -} - -func buildGetUserDataPath(params interface{}) (string, error) { - return getUserDataPath, nil -} - -func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { - return &GetUserDataOutput{ - Content: resp.Body, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go deleted file mode 100644 index ad283cf82..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go +++ /dev/null @@ -1,48 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type signRequestMiddleware struct { -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go deleted file mode 100644 index d5765c36b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package imds provides the API client for interacting with the Amazon EC2 -// Instance Metadata Service. -// -// All Client operation calls have a default timeout. If the operation is not -// completed before this timeout expires, the operation will be canceled. This -// timeout can be overridden through the following: -// - Set the options flag DisableDefaultTimeout -// - Provide a Context with a timeout or deadline with calling the client's operations. -// -// See the EC2 IMDS user guide for more information on using the API. -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html -package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go deleted file mode 100644 index d7540da34..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go +++ /dev/null @@ -1,20 +0,0 @@ -package imds - -import ( - "context" - "github.com/aws/smithy-go/middleware" -) - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go deleted file mode 100644 index 694b1148f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package imds - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.24" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go deleted file mode 100644 index ce7745589..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go +++ /dev/null @@ -1,114 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// ClientEnableState provides an enumeration if the client is enabled, -// disabled, or default behavior. -type ClientEnableState uint - -// Enumeration values for ClientEnableState -const ( - ClientDefaultEnableState ClientEnableState = iota - ClientDisabled - ClientEnabled -) - -// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode -type EndpointModeState uint - -// Enumeration values for ClientEnableState -const ( - EndpointModeStateUnset EndpointModeState = iota - EndpointModeStateIPv4 - EndpointModeStateIPv6 -) - -// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset -func (e *EndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. -type ClientEnableStateResolver interface { - GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) -} - -// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. -type EndpointModeResolver interface { - GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) -} - -// EndpointResolver is a config resolver interface for retrieving the endpoint. -type EndpointResolver interface { - GetEC2IMDSEndpoint() (string, bool, error) -} - -type v1FallbackDisabledResolver interface { - GetEC2IMDSV1FallbackDisabled() (bool, bool) -} - -// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. -func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(ClientEnableStateResolver); ok { - value, found, err = resolver.GetEC2IMDSClientEnableState() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. -func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointModeResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpointMode() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. -func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { - for _, source := range sources { - if resolver, ok := source.(EndpointResolver); ok { - value, found, err = resolver.GetEC2IMDSEndpoint() - if err != nil || found { - return value, found, err - } - } - } - return value, found, err -} - -// ResolveV1FallbackDisabled ... -func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { - for _, source := range sources { - if resolver, ok := source.(v1FallbackDisabledResolver); ok { - if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { - return v, true - } - } - } - return false, false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go deleted file mode 100644 index 90cf4aeb3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ /dev/null @@ -1,313 +0,0 @@ -package imds - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "net/url" - "path" - "time" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func addAPIRequestMiddleware(stack *middleware.Stack, - options Options, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput) - if err != nil { - return err - } - - // Token Serializer build and state management. - if !options.disableAPIToken { - err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) - if err != nil { - return err - } - } - - return nil -} - -func addRequestMiddleware(stack *middleware.Stack, - options Options, - method string, - operation string, - getPath func(interface{}) (string, error), - getOutput func(*smithyhttp.Response) (interface{}, error), -) (err error) { - err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) - if err != nil { - return err - } - - // Operation timeout - err = stack.Initialize.Add(&operationTimeout{ - Disabled: options.DisableDefaultTimeout, - DefaultTimeout: defaultOperationTimeout, - }, middleware.Before) - if err != nil { - return err - } - - // Operation Serializer - err = stack.Serialize.Add(&serializeRequest{ - GetPath: getPath, - Method: method, - }, middleware.After) - if err != nil { - return err - } - - // Operation endpoint resolver - err = stack.Serialize.Insert(&resolveEndpoint{ - Endpoint: options.Endpoint, - EndpointMode: options.EndpointMode, - }, "OperationSerializer", middleware.Before) - if err != nil { - return err - } - - // Operation Deserializer - err = stack.Deserialize.Add(&deserializeResponse{ - GetOutput: getOutput, - }, middleware.After) - if err != nil { - return err - } - - err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: options.ClientLogMode.IsRequest(), - LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), - LogResponse: options.ClientLogMode.IsResponse(), - LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), - }, middleware.After) - if err != nil { - return err - } - - err = addSetLoggerMiddleware(stack, options) - if err != nil { - return err - } - - if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil { - return fmt.Errorf("add protocol finalizers: %w", err) - } - - // Retry support - return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ - Retryer: options.Retryer, - LogRetryAttempts: options.ClientLogMode.IsRetries(), - }) -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -type serializeRequest struct { - GetPath func(interface{}) (string, error) - Method string -} - -func (*serializeRequest) ID() string { - return "OperationSerializer" -} - -func (m *serializeRequest) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - reqPath, err := m.GetPath(in.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) - } - - request.Request.URL.Path = reqPath - request.Request.Method = m.Method - - return next.HandleSerialize(ctx, in) -} - -type deserializeResponse struct { - GetOutput func(*smithyhttp.Response) (interface{}, error) -} - -func (*deserializeResponse) ID() string { - return "OperationDeserializer" -} - -func (m *deserializeResponse) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf( - "unexpected transport response type, %T, want %T", out.RawResponse, resp) - } - defer resp.Body.Close() - - // read the full body so that any operation timeouts cleanup will not race - // the body being read. - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return out, metadata, fmt.Errorf("read response body failed, %w", err) - } - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - - // Anything that's not 200 |< 300 is error - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return out, metadata, &smithyhttp.ResponseError{ - Response: resp, - Err: fmt.Errorf("request to EC2 IMDS failed"), - } - } - - result, err := m.GetOutput(resp) - if err != nil { - return out, metadata, fmt.Errorf( - "unable to get deserialized result for response, %w", err, - ) - } - out.Result = result - - return out, metadata, err -} - -type resolveEndpoint struct { - Endpoint string - EndpointMode EndpointModeState -} - -func (*resolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *resolveEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - var endpoint string - if len(m.Endpoint) > 0 { - endpoint = m.Endpoint - } else { - switch m.EndpointMode { - case EndpointModeStateIPv6: - endpoint = defaultIPv6Endpoint - case EndpointModeStateIPv4: - fallthrough - case EndpointModeStateUnset: - endpoint = defaultIPv4Endpoint - default: - return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") - } - } - - req.URL, err = url.Parse(endpoint) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - return next.HandleSerialize(ctx, in) -} - -const ( - defaultOperationTimeout = 5 * time.Second -) - -// operationTimeout adds a timeout on the middleware stack if the Context the -// stack was called with does not have a deadline. The next middleware must -// complete before the timeout, or the context will be canceled. -// -// If DefaultTimeout is zero, no default timeout will be used if the Context -// does not have a timeout. -// -// The next middleware must also ensure that any resources that are also -// canceled by the stack's context are completely consumed before returning. -// Otherwise the timeout cleanup will race the resource being consumed -// upstream. -type operationTimeout struct { - Disabled bool - DefaultTimeout time.Duration -} - -func (*operationTimeout) ID() string { return "OperationTimeout" } - -func (m *operationTimeout) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - output middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.Disabled { - return next.HandleInitialize(ctx, input) - } - - if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { - var cancelFn func() - ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) - defer cancelFn() - } - - return next.HandleInitialize(ctx, input) -} - -// appendURIPath joins a URI path component to the existing path with `/` -// separators between the path components. If the path being added ends with a -// trailing `/` that slash will be maintained. -func appendURIPath(base, add string) string { - reqPath := path.Join(base, add) - if len(add) != 0 && add[len(add)-1] == '/' { - reqPath += "/" - } - return reqPath -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %w", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %w", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go deleted file mode 100644 index 5703c6e16..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go +++ /dev/null @@ -1,261 +0,0 @@ -package imds - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "net/http" - "sync" - "sync/atomic" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - // Headers for Token and TTL - tokenHeader = "x-aws-ec2-metadata-token" - defaultTokenTTL = 5 * time.Minute -) - -type tokenProvider struct { - client *Client - tokenTTL time.Duration - - token *apiToken - tokenMux sync.RWMutex - - disabled uint32 // Atomic updated -} - -func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { - return &tokenProvider{ - client: client, - tokenTTL: ttl, - } -} - -// apiToken provides the API token used by all operation calls for th EC2 -// Instance metadata service. -type apiToken struct { - token string - expires time.Time -} - -var timeNow = time.Now - -// Expired returns if the token is expired. -func (t *apiToken) Expired() bool { - // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry - // time is always based on reported wall-clock time. - return timeNow().Round(0).After(t.expires) -} - -func (t *tokenProvider) ID() string { return "APITokenProvider" } - -// HandleFinalize is the finalize stack middleware, that if the token provider is -// enabled, will attempt to add the cached API token to the request. If the API -// token is not cached, it will be retrieved in a separate API call, getToken. -// -// For retry attempts, handler must be added after attempt retryer. -// -// If request for getToken fails the token provider may be disabled from future -// requests, depending on the response status code. -func (t *tokenProvider) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if t.fallbackEnabled() && !t.enabled() { - // short-circuits to insecure data flow if token provider is disabled. - return next.HandleFinalize(ctx, input) - } - - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) - } - - tok, err := t.getToken(ctx) - if err != nil { - // If the error allows the token to downgrade to insecure flow allow that. - var bypassErr *bypassTokenRetrievalError - if errors.As(err, &bypassErr) { - return next.HandleFinalize(ctx, input) - } - - return out, metadata, fmt.Errorf("failed to get API token, %w", err) - } - - req.Header.Set(tokenHeader, tok.token) - - return next.HandleFinalize(ctx, input) -} - -// HandleDeserialize is the deserialize stack middleware for determining if the -// operation the token provider is decorating failed because of a 401 -// unauthorized status code. If the operation failed for that reason the token -// provider needs to be re-enabled so that it can start adding the API token to -// operation calls. -func (t *tokenProvider) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, input) - if err == nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) - } - - if resp.StatusCode == http.StatusUnauthorized { // unauthorized - t.enable() - err = &retryableError{Err: err, isRetryable: true} - } - - return out, metadata, err -} - -func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { - if t.fallbackEnabled() && !t.enabled() { - return nil, &bypassTokenRetrievalError{ - Err: fmt.Errorf("cannot get API token, provider disabled"), - } - } - - t.tokenMux.RLock() - tok = t.token - t.tokenMux.RUnlock() - - if tok != nil && !tok.Expired() { - return tok, nil - } - - tok, err = t.updateToken(ctx) - if err != nil { - return nil, err - } - - return tok, nil -} - -func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { - t.tokenMux.Lock() - defer t.tokenMux.Unlock() - - // Prevent multiple requests to update retrieving the token. - if t.token != nil && !t.token.Expired() { - tok := t.token - return tok, nil - } - - result, err := t.client.getToken(ctx, &getTokenInput{ - TokenTTL: t.tokenTTL, - }) - if err != nil { - var statusErr interface{ HTTPStatusCode() int } - if errors.As(err, &statusErr) { - switch statusErr.HTTPStatusCode() { - // Disable future get token if failed because of 403, 404, or 405 - case http.StatusForbidden, - http.StatusNotFound, - http.StatusMethodNotAllowed: - - if t.fallbackEnabled() { - logger := middleware.GetLogger(ctx) - logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) - t.disable() - } - - // 400 errors are terminal, and need to be upstreamed - case http.StatusBadRequest: - return nil, err - } - } - - // Disable if request send failed or timed out getting response - var re *smithyhttp.RequestSendError - var ce *smithy.CanceledError - if errors.As(err, &re) || errors.As(err, &ce) { - atomic.StoreUint32(&t.disabled, 1) - } - - if !t.fallbackEnabled() { - // NOTE: getToken() is an implementation detail of some outer operation - // (e.g. GetMetadata). It has its own retries that have already been exhausted. - // Mark the underlying error as a terminal error. - err = &retryableError{Err: err, isRetryable: false} - return nil, err - } - - // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request - // and allow the request to proceed. Future requests _may_ re-attempt fetching a - // token if not disabled. - return nil, &bypassTokenRetrievalError{Err: err} - } - - tok := &apiToken{ - token: result.Token, - expires: timeNow().Add(result.TokenTTL), - } - t.token = tok - - return tok, nil -} - -// enabled returns if the token provider is current enabled or not. -func (t *tokenProvider) enabled() bool { - return atomic.LoadUint32(&t.disabled) == 0 -} - -// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise -func (t *tokenProvider) fallbackEnabled() bool { - switch t.client.options.EnableFallback { - case aws.FalseTernary: - return false - default: - return true - } -} - -// disable disables the token provider and it will no longer attempt to inject -// the token, nor request updates. -func (t *tokenProvider) disable() { - atomic.StoreUint32(&t.disabled, 1) -} - -// enable enables the token provide to start refreshing tokens, and adding them -// to the pending request. -func (t *tokenProvider) enable() { - t.tokenMux.Lock() - t.token = nil - t.tokenMux.Unlock() - atomic.StoreUint32(&t.disabled, 0) -} - -type bypassTokenRetrievalError struct { - Err error -} - -func (e *bypassTokenRetrievalError) Error() string { - return fmt.Sprintf("bypass token retrieval, %v", e.Err) -} - -func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } - -type retryableError struct { - Err error - isRetryable bool -} - -func (e *retryableError) RetryableError() bool { return e.isRetryable } - -func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md deleted file mode 100644 index c4f94368f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ /dev/null @@ -1,931 +0,0 @@ -# v1.17.50 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.49 (2025-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.48 (2025-01-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.47 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.46 (2025-01-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.45 (2025-01-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.44 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.43 (2024-12-03.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.42 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.41 (2024-11-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.40 (2024-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.39 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.38 (2024-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.37 (2024-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.36 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.35 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.34 (2024-10-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.33 (2024-10-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.32 (2024-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.31 (2024-10-09) - -* **Bug Fix**: Fixup some integration tests. - -# v1.17.30 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.29 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.28 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.27 (2024-10-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.26 (2024-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.25 (2024-09-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.24 (2024-09-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.23 (2024-09-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.22 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.21 (2024-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.20 (2024-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.19 (2024-09-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2024-09-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2024-08-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2024-08-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2024-08-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.13 (2024-08-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2024-08-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2024-08-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-07-24) - -* **Documentation**: Clarify region hint and credential usage in HeadBucketRegion. - -# v1.17.8 (2024-07-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-07-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-07-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-06-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.25 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.24 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.23 (2024-06-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.22 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.21 (2024-05-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.20 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2024-05-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2024-05-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2024-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2024-04-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.14 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2024-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2024-03-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-03-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2024-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2024-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2024-02-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2024-02-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.15 (2024-01-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.14 (2024-01-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.13 (2024-01-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.12 (2024-01-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.11 (2024-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.9 (2023-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.8 (2023-12-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.7 (2023-12-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2023-12-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-11-28.3) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-11-28.2) - -* **Feature**: Add S3Express support. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-11-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-11-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-11-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-11-17) - -* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2023-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2023-11-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2023-11-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2023-11-09.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-11-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-11-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.92 (2023-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.91 (2023-10-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.90 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.89 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.88 (2023-10-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.87 (2023-09-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.86 (2023-09-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.85 (2023-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.84 (2023-09-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.83 (2023-09-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.82 (2023-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.81 (2023-08-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.80 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.79 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.78 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.77 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.76 (2023-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.75 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.74 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.73 (2023-07-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.72 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.71 (2023-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.70 (2023-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.69 (2023-06-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.68 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.67 (2023-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.66 (2023-05-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.65 (2023-05-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.64 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.63 (2023-04-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.62 (2023-04-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.61 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.60 (2023-03-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.59 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.58 (2023-03-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.57 (2023-03-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.56 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.55 (2023-02-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.54 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.53 (2023-02-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.52 (2023-02-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.51 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.50 (2023-02-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.49 (2023-01-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.48 (2023-01-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.47 (2023-01-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.46 (2022-12-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.45 (2022-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.44 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.43 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.42 (2022-11-22) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.41 (2022-11-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.40 (2022-11-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.39 (2022-11-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.38 (2022-11-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.37 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.36 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.35 (2022-10-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.34 (2022-09-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.33 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.32 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.31 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.30 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.29 (2022-08-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.28 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.27 (2022-08-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.26 (2022-08-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.25 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.24 (2022-08-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.23 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.22 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.21 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2022-07-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.18 (2022-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.16 (2022-06-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2022-05-26) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2022-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.11 (2022-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2022-05-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2022-05-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.8 (2022-05-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2022-04-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.6 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.5 (2022-04-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2022-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2022-01-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.5 (2021-12-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.4 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.3 (2021-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-11-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.4 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.3 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-09-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-06-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-05-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go deleted file mode 100644 index 4059f9851..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go +++ /dev/null @@ -1,37 +0,0 @@ -package manager - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation. -type DeleteObjectsAPIClient interface { - DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error) -} - -// DownloadAPIClient is an S3 API client that can invoke the GetObject operation. -type DownloadAPIClient interface { - GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error) -} - -// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error) -} - -// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) -} - -// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload, -// CompleteMultipartUpload, and AbortMultipartUpload operations. -type UploadAPIClient interface { - PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error) - UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error) - CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error) - CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error) - AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go deleted file mode 100644 index d3b828979..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go +++ /dev/null @@ -1,23 +0,0 @@ -package manager - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -func validateSupportedARNType(bucket string) error { - if !arn.IsARN(bucket) { - return nil - } - - parsedARN, err := arn.Parse(bucket) - if err != nil { - return err - } - - if parsedARN.Service == "s3-object-lambda" { - return fmt.Errorf("manager does not support s3-object-lambda service ARNs") - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go deleted file mode 100644 index 8c7019529..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go +++ /dev/null @@ -1,147 +0,0 @@ -package manager - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const bucketRegionHeader = "X-Amz-Bucket-Region" - -// GetBucketRegion will attempt to get the region for a bucket using the -// client's configured region to determine which AWS partition to perform the query on. -// -// A BucketNotFound error will be returned if the bucket does not exist in the -// AWS partition the client region belongs to. -// -// For example to get the region of a bucket which exists in "eu-central-1" -// you could provide a region hint of "us-west-2". -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) -// if err != nil { -// log.Println("error:", err) -// return -// } -// -// bucket := "my-bucket" -// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket) -// if err != nil { -// var bnf manager.BucketNotFound -// if errors.As(err, &bnf) { -// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket) -// } -// return -// } -// fmt.Printf("Bucket %s is in %s region\n", bucket, region) -// -// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing. -// -// bucketname.s3.us-west-2.amazonaws.com/ -// -// To configure the GetBucketRegion to make a request via the Amazon -// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g. -// fips-us-gov-west-1) set the EndpointResolver on the config or client the -// utility is called with. -// -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithEndpointResolver( -// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { -// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil -// }), -// ) -// if err != nil { -// panic(err) -// } -// -// If buckets are public, you may use anonymous credential like so. -// -// manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) -// -// The request with anonymous credentials will not be signed. -// Otherwise credentials would be required for private buckets. -func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) { - var captureBucketRegion deserializeBucketRegion - - clientOptionFns := make([]func(*s3.Options), len(optFns)+1) - clientOptionFns[0] = func(options *s3.Options) { - options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware) - } - copy(clientOptionFns[1:], optFns) - - _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{ - Bucket: aws.String(bucket), - }, clientOptionFns...) - if len(captureBucketRegion.BucketRegion) == 0 && err != nil { - var httpStatusErr interface { - HTTPStatusCode() int - } - if !errors.As(err, &httpStatusErr) { - return "", err - } - - if httpStatusErr.HTTPStatusCode() == http.StatusNotFound { - return "", &bucketNotFound{} - } - - return "", err - } - - return captureBucketRegion.BucketRegion, nil -} - -type deserializeBucketRegion struct { - BucketRegion string -} - -func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Add(d, middleware.After) -} - -func (d *deserializeBucketRegion) ID() string { - return "DeserializeBucketRegion" -} - -func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) - } - - d.BucketRegion = resp.Header.Get(bucketRegionHeader) - - return out, metadata, err -} - -// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion. -type BucketNotFound interface { - error - - isBucketNotFound() -} - -type bucketNotFound struct{} - -func (b *bucketNotFound) Error() string { - return "bucket not found" -} - -func (b *bucketNotFound) isBucketNotFound() {} - -var _ BucketNotFound = (*bucketNotFound)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go deleted file mode 100644 index e781aef61..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go +++ /dev/null @@ -1,79 +0,0 @@ -package manager - -import ( - "io" -) - -// BufferedReadSeeker is buffered io.ReadSeeker -type BufferedReadSeeker struct { - r io.ReadSeeker - buffer []byte - readIdx, writeIdx int -} - -// NewBufferedReadSeeker returns a new BufferedReadSeeker -// if len(b) == 0 then the buffer will be initialized to 64 KiB. -func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker { - if len(b) == 0 { - b = make([]byte, 64*1024) - } - return &BufferedReadSeeker{r: r, buffer: b} -} - -func (b *BufferedReadSeeker) reset(r io.ReadSeeker) { - b.r = r - b.readIdx, b.writeIdx = 0, 0 -} - -// Read will read up len(p) bytes into p and will return -// the number of bytes read and any error that occurred. -// If the len(p) > the buffer size then a single read request -// will be issued to the underlying io.ReadSeeker for len(p) bytes. -// A Read request will at most perform a single Read to the underlying -// io.ReadSeeker, and may return < len(p) if serviced from the buffer. -func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return n, err - } - - if b.readIdx == b.writeIdx { - if len(p) >= len(b.buffer) { - n, err = b.r.Read(p) - return n, err - } - b.readIdx, b.writeIdx = 0, 0 - - n, err = b.r.Read(b.buffer) - if n == 0 { - return n, err - } - - b.writeIdx += n - } - - n = copy(p, b.buffer[b.readIdx:b.writeIdx]) - b.readIdx += n - - return n, err -} - -// Seek will position then underlying io.ReadSeeker to the given offset -// and will clear the buffer. -func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { - n, err := b.r.Seek(offset, whence) - - b.reset(b.r) - - return n, err -} - -// ReadAt will read up to len(p) bytes at the given file offset. -// This will result in the buffer being cleared. -func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) { - _, err := b.Seek(off, io.SeekStart) - if err != nil { - return 0, err - } - - return b.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go deleted file mode 100644 index e2ab143b6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows -// +build !windows - -package manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go deleted file mode 100644 index 1ae881c10..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package manager - -func defaultUploadBufferProvider() ReadSeekerWriteToProvider { - return NewBufferedReadSeekerWriteToPool(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go deleted file mode 100644 index 179fe10f4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows -// +build !windows - -package manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go deleted file mode 100644 index 88887ff58..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package manager - -func defaultDownloadBufferProvider() WriterReadFromProvider { - return NewPooledBufferedWriterReadFromProvider(1024 * 1024) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go deleted file mode 100644 index 31171a698..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package manager provides utilities to upload and download objects from -// S3 concurrently. Helpful for when working with large objects. -package manager diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go deleted file mode 100644 index 5a9fe2dd3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go +++ /dev/null @@ -1,528 +0,0 @@ -package manager - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/smithy-go/logging" -) - -const userAgentKey = "s3-transfer" - -// DefaultDownloadPartSize is the default range of bytes to get at a time when -// using Download(). -const DefaultDownloadPartSize = 1024 * 1024 * 5 - -// DefaultDownloadConcurrency is the default number of goroutines to spin up -// when using Download(). -const DefaultDownloadConcurrency = 5 - -// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download. -const DefaultPartBodyMaxRetries = 3 - -type errReadingBody struct { - err error -} - -func (e *errReadingBody) Error() string { - return fmt.Sprintf("failed to read part body: %v", e.err) -} - -func (e *errReadingBody) Unwrap() error { - return e.err -} - -// The Downloader structure that calls Download(). It is safe to call Download() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Downloader's properties is not safe to be done concurrently. -type Downloader struct { - // The size (in bytes) to request from S3 for each part. - // The minimum allowed part size is 5MB, and if this value is set to zero, - // the DefaultDownloadPartSize value will be used. - // - // PartSize is ignored if the Range input parameter is provided. - PartSize int64 - - // PartBodyMaxRetries is the number of retry attempts to make for failed part downloads. - PartBodyMaxRetries int - - // Logger to send logging messages to - Logger logging.Logger - - // Enable Logging of part download retry attempts - LogInterruptedDownloads bool - - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultDownloadConcurrency value will be used. - // - // Concurrency of 1 will download the parts sequentially. - // - // Concurrency is ignored if the Range input parameter is provided. - Concurrency int - - // An S3 client to use when performing downloads. - S3 DownloadAPIClient - - // List of client options that will be passed down to individual API - // operation requests made by the downloader. - ClientOptions []func(*s3.Options) - - // Defines the buffer strategy used when downloading a part. - // - // If a WriterReadFromProvider is given the Download manager - // will pass the io.WriterAt of the Download request to the provider - // and will use the returned WriterReadFrom from the provider as the - // destination writer when copying from http response body. - BufferProvider WriterReadFromProvider -} - -// WithDownloaderClientOptions appends to the Downloader's API request options. -func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) { - return func(d *Downloader) { - d.ClientOptions = append(d.ClientOptions, opts...) - } -} - -// NewDownloader creates a new Downloader instance to downloads objects from -// S3 in concurrent chunks. Pass in additional functional options to customize -// the downloader behavior. Requires a client.ConfigProvider in order to create -// a S3 service client. The session.Session satisfies the client.ConfigProvider -// interface. -// -// Example: -// -// // Load AWS Config -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create an S3 client using the loaded configuration -// s3.NewFromConfig(cfg) -// -// // Create a downloader passing it the S3 client -// downloader := manager.NewDownloader(s3.NewFromConfig(cfg)) -// -// // Create a downloader with the client and custom downloader options -// downloader := manager.NewDownloader(client, func(d *manager.Downloader) { -// d.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader { - d := &Downloader{ - S3: c, - PartSize: DefaultDownloadPartSize, - PartBodyMaxRetries: DefaultPartBodyMaxRetries, - Concurrency: DefaultDownloadConcurrency, - BufferProvider: defaultDownloadBufferProvider(), - } - for _, option := range options { - option(d) - } - - return d -} - -// Download downloads an object in S3 and writes the payload into w -// using concurrent GET requests. The n int64 returned is the size of the object downloaded -// in bytes. -// -// DownloadWithContext is the same as Download with the additional support for -// Context input parameters. The Context must not be nil. A nil Context will -// cause a panic. Use the Context to add deadlining, timeouts, etc. The -// DownloadWithContext may create sub-contexts for individual underlying -// requests. -// -// Additional functional options can be provided to configure the individual -// download. These options are copies of the Downloader instance Download is -// called from. Modifying the options will not impact the original Downloader -// instance. Use the WithDownloaderClientOptions helper function to pass in request -// options that will be applied to all API operations made with this downloader. -// -// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent -// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download -// files into memory do not forget to pre-allocate memory to avoid additional allocations -// and GC runs. -// -// Example: -// -// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput -// buf := make([]byte, int(headObject.ContentLength)) -// // wrap with aws.WriteAtBuffer -// w := s3manager.NewWriteAtBuffer(buf) -// // download file into the memory -// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{ -// Bucket: aws.String(bucket), -// Key: aws.String(item), -// }) -// -// Specifying a Downloader.Concurrency of 1 will cause the Downloader to -// download the parts from S3 sequentially. -// -// It is safe to call this method concurrently across goroutines. -// -// If the GetObjectInput's Range value is provided that will cause the downloader -// to perform a single GetObjectInput request for that object's range. This will -// caused the part size, and concurrency configurations to be ignored. -func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { - if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil { - return 0, err - } - - impl := downloader{w: w, in: input, cfg: d, ctx: ctx} - - // Copy ClientOptions - clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) - clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, - middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), - addFeatureUserAgent, // yes, there are two of these - ) - }) - clientOptions = append(clientOptions, impl.cfg.ClientOptions...) - impl.cfg.ClientOptions = clientOptions - - for _, option := range options { - option(&impl.cfg) - } - - // Ensures we don't need nil checks later on - impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger) - - impl.partBodyMaxRetries = d.PartBodyMaxRetries - - impl.totalBytes = -1 - if impl.cfg.Concurrency == 0 { - impl.cfg.Concurrency = DefaultDownloadConcurrency - } - - if impl.cfg.PartSize == 0 { - impl.cfg.PartSize = DefaultDownloadPartSize - } - - return impl.download() -} - -// downloader is the implementation structure used internally by Downloader. -type downloader struct { - ctx context.Context - cfg Downloader - - in *s3.GetObjectInput - w io.WriterAt - - wg sync.WaitGroup - m sync.Mutex - - pos int64 - totalBytes int64 - written int64 - err error - - partBodyMaxRetries int -} - -// download performs the implementation of the object download across ranged -// GETs. -func (d *downloader) download() (n int64, err error) { - // If range is specified fall back to single download of that range - // this enables the functionality of ranged gets with the downloader but - // at the cost of no multipart downloads. - if rng := aws.ToString(d.in.Range); len(rng) > 0 { - d.downloadRange(rng) - return d.written, d.err - } - - // Spin off first worker to check additional header information - d.getChunk() - - if total := d.getTotalBytes(); total >= 0 { - // Spin up workers - ch := make(chan dlchunk, d.cfg.Concurrency) - - for i := 0; i < d.cfg.Concurrency; i++ { - d.wg.Add(1) - go d.downloadPart(ch) - } - - // Assign work - for d.getErr() == nil { - if d.pos >= total { - break // We're finished queuing chunks - } - - // Queue the next range of bytes to read. - ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - } - - // Wait for completion - close(ch) - d.wg.Wait() - } else { - // Checking if we read anything new - for d.err == nil { - d.getChunk() - } - - // We expect a 416 error letting us know we are done downloading the - // total bytes. Since we do not know the content's length, this will - // keep grabbing chunks of data until the range of bytes specified in - // the request is out of range of the content. Once, this happens, a - // 416 should occur. - var responseError interface { - HTTPStatusCode() int - } - if errors.As(d.err, &responseError) { - if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable { - d.err = nil - } - } - } - - // Return error - return d.written, d.err -} - -// downloadPart is an individual goroutine worker reading from the ch channel -// and performing a GetObject request on the data with a given byte range. -// -// If this is the first worker, this operation also resolves the total number -// of bytes to be read so that the worker manager knows when it is finished. -func (d *downloader) downloadPart(ch chan dlchunk) { - defer d.wg.Done() - for { - chunk, ok := <-ch - if !ok { - break - } - if d.getErr() != nil { - // Drain the channel if there is an error, to prevent deadlocking - // of download producer. - continue - } - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - } -} - -// getChunk grabs a chunk of data from the body. -// Not thread safe. Should only used when grabbing data on a single thread. -func (d *downloader) getChunk() { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize} - d.pos += d.cfg.PartSize - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } -} - -// downloadRange downloads an Object given the passed in Byte-Range value. -// The chunk used down download the range will be configured for that range. -func (d *downloader) downloadRange(rng string) { - if d.getErr() != nil { - return - } - - chunk := dlchunk{w: d.w, start: d.pos} - // Ranges specified will short circuit the multipart download - chunk.withRange = rng - - if err := d.downloadChunk(chunk); err != nil { - d.setErr(err) - } - - // Update the position based on the amount of data received. - d.pos = d.written -} - -// downloadChunk downloads the chunk from s3 -func (d *downloader) downloadChunk(chunk dlchunk) error { - var params s3.GetObjectInput - awsutil.Copy(¶ms, d.in) - - // Get the next byte range of data - params.Range = aws.String(chunk.ByteRange()) - - var n int64 - var err error - for retry := 0; retry <= d.partBodyMaxRetries; retry++ { - n, err = d.tryDownloadChunk(¶ms, &chunk) - if err == nil { - break - } - // Check if the returned error is an errReadingBody. - // If err is errReadingBody this indicates that an error - // occurred while copying the http response body. - // If this occurs we unwrap the err to set the underlying error - // and attempt any remaining retries. - if bodyErr, ok := err.(*errReadingBody); ok { - err = bodyErr.Unwrap() - } else { - return err - } - - chunk.cur = 0 - - d.cfg.Logger.Logf(logging.Debug, - "object part body download interrupted %s, err, %v, retrying attempt %d", - aws.ToString(params.Key), err, retry) - } - - d.incrWritten(n) - - return err -} - -func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) { - cleanup := func() {} - if d.cfg.BufferProvider != nil { - w, cleanup = d.cfg.BufferProvider.GetReadFrom(w) - } - defer cleanup() - - resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...) - if err != nil { - return 0, err - } - d.setTotalBytes(resp) // Set total if not yet set. - - var src io.Reader = resp.Body - if d.cfg.BufferProvider != nil { - src = &suppressWriterAt{suppressed: src} - } - n, err := io.Copy(w, src) - resp.Body.Close() - if err != nil { - return n, &errReadingBody{err: err} - } - - return n, nil -} - -// getTotalBytes is a thread-safe getter for retrieving the total byte status. -func (d *downloader) getTotalBytes() int64 { - d.m.Lock() - defer d.m.Unlock() - - return d.totalBytes -} - -// setTotalBytes is a thread-safe setter for setting the total byte status. -// Will extract the object's total bytes from the Content-Range if the file -// will be chunked, or Content-Length. Content-Length is used when the response -// does not include a Content-Range. Meaning the object was not chunked. This -// occurs when the full file fits within the PartSize directive. -func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { - d.m.Lock() - defer d.m.Unlock() - - if d.totalBytes >= 0 { - return - } - - if resp.ContentRange == nil { - // ContentRange is nil when the full file contents is provided, and - // is not chunked. Use ContentLength instead. - if aws.ToInt64(resp.ContentLength) > 0 { - d.totalBytes = aws.ToInt64(resp.ContentLength) - return - } - } else { - parts := strings.Split(*resp.ContentRange, "/") - - total := int64(-1) - var err error - // Checking for whether or not a numbered total exists - // If one does not exist, we will assume the total to be -1, undefined, - // and sequentially download each chunk until hitting a 416 error - totalStr := parts[len(parts)-1] - if totalStr != "*" { - total, err = strconv.ParseInt(totalStr, 10, 64) - if err != nil { - d.err = err - return - } - } - - d.totalBytes = total - } -} - -func (d *downloader) incrWritten(n int64) { - d.m.Lock() - defer d.m.Unlock() - - d.written += n -} - -// getErr is a thread-safe getter for the error object -func (d *downloader) getErr() error { - d.m.Lock() - defer d.m.Unlock() - - return d.err -} - -// setErr is a thread-safe setter for the error object -func (d *downloader) setErr(e error) { - d.m.Lock() - defer d.m.Unlock() - - d.err = e -} - -// dlchunk represents a single chunk of data to write by the worker routine. -// This structure also implements an io.SectionReader style interface for -// io.WriterAt, effectively making it an io.SectionWriter (which does not -// exist). -type dlchunk struct { - w io.WriterAt - start int64 - size int64 - cur int64 - - // specifies the byte range the chunk should be downloaded with. - withRange string -} - -// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start -// position to its end (or EOF). -// -// If a range is specified on the dlchunk the size will be ignored when writing. -// as the total size may not of be known ahead of time. -func (c *dlchunk) Write(p []byte) (n int, err error) { - if c.cur >= c.size && len(c.withRange) == 0 { - return 0, io.EOF - } - - n, err = c.w.WriteAt(p, c.start+c.cur) - c.cur += int64(n) - - return -} - -// ByteRange returns a HTTP Byte-Range header value that should be used by the -// client to request the chunk's range. -func (c *dlchunk) ByteRange() string { - if len(c.withRange) != 0 { - return c.withRange - } - - return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go deleted file mode 100644 index 3e9fd9be0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package manager - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.50" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go deleted file mode 100644 index 6b93a3bc4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go +++ /dev/null @@ -1,251 +0,0 @@ -package manager - -import ( - "context" - "fmt" - "sync" -) - -type byteSlicePool interface { - Get(context.Context) (*[]byte, error) - Put(*[]byte) - ModifyCapacity(int) - SliceSize() int64 - Close() -} - -type maxSlicePool struct { - // allocator is defined as a function pointer to allow - // for test cases to instrument custom tracers when allocations - // occur. - allocator sliceAllocator - - slices chan *[]byte - allocations chan struct{} - capacityChange chan struct{} - - max int - sliceSize int64 - - mtx sync.RWMutex -} - -func newMaxSlicePool(sliceSize int64) *maxSlicePool { - p := &maxSlicePool{sliceSize: sliceSize} - p.allocator = p.newSlice - - return p -} - -var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") - -func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { - // check if context is canceled before attempting to get a slice - // this ensures priority is given to the cancel case first - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - p.mtx.RLock() - - for { - select { - case bs, ok := <-p.slices: - p.mtx.RUnlock() - if !ok { - // attempt to get on a zero capacity pool - return nil, errZeroCapacity - } - return bs, nil - case <-ctx.Done(): - p.mtx.RUnlock() - return nil, ctx.Err() - default: - // pass - } - - select { - case _, ok := <-p.allocations: - p.mtx.RUnlock() - if !ok { - // attempt to get on a zero capacity pool - return nil, errZeroCapacity - } - return p.allocator(), nil - case <-ctx.Done(): - p.mtx.RUnlock() - return nil, ctx.Err() - default: - // In the event that there are no slices or allocations available - // This prevents some deadlock situations that can occur around sync.RWMutex - // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. - // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where - // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, - // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. - - // Short-circuit if the pool capacity is zero. - if p.max == 0 { - p.mtx.RUnlock() - return nil, errZeroCapacity - } - - // Since we will be releasing the read-lock we need to take the reference to the channel. - // Since channels are references we will still get notified if slices are added, or if - // the channel is closed due to a capacity modification. This specifically avoids a data race condition - // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. - c := p.capacityChange - - p.mtx.RUnlock() - - select { - case _ = <-c: - p.mtx.RLock() - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } -} - -func (p *maxSlicePool) Put(bs *[]byte) { - p.mtx.RLock() - defer p.mtx.RUnlock() - - if p.max == 0 { - return - } - - select { - case p.slices <- bs: - p.notifyCapacity() - default: - // If the new channel when attempting to add the slice then we drop the slice. - // The logic here is to prevent a deadlock situation if channel is already at max capacity. - // Allows us to reap allocations that are returned and are no longer needed. - } -} - -func (p *maxSlicePool) ModifyCapacity(delta int) { - if delta == 0 { - return - } - - p.mtx.Lock() - defer p.mtx.Unlock() - - p.max += delta - - if p.max == 0 { - p.empty() - return - } - - if p.capacityChange != nil { - close(p.capacityChange) - } - p.capacityChange = make(chan struct{}, p.max) - - origAllocations := p.allocations - p.allocations = make(chan struct{}, p.max) - - newAllocs := len(origAllocations) + delta - for i := 0; i < newAllocs; i++ { - p.allocations <- struct{}{} - } - - if origAllocations != nil { - close(origAllocations) - } - - origSlices := p.slices - p.slices = make(chan *[]byte, p.max) - if origSlices == nil { - return - } - - close(origSlices) - for bs := range origSlices { - select { - case p.slices <- bs: - default: - // If the new channel blocks while adding slices from the old channel - // then we drop the slice. The logic here is to prevent a deadlock situation - // if the new channel has a smaller capacity then the old. - } - } -} - -func (p *maxSlicePool) notifyCapacity() { - select { - case p.capacityChange <- struct{}{}: - default: - // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized - // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. - } -} - -func (p *maxSlicePool) SliceSize() int64 { - return p.sliceSize -} - -func (p *maxSlicePool) Close() { - p.mtx.Lock() - defer p.mtx.Unlock() - p.empty() -} - -func (p *maxSlicePool) empty() { - p.max = 0 - - if p.capacityChange != nil { - close(p.capacityChange) - p.capacityChange = nil - } - - if p.allocations != nil { - close(p.allocations) - for range p.allocations { - // drain channel - } - p.allocations = nil - } - - if p.slices != nil { - close(p.slices) - for range p.slices { - // drain channel - } - p.slices = nil - } -} - -func (p *maxSlicePool) newSlice() *[]byte { - bs := make([]byte, p.sliceSize) - return &bs -} - -type returnCapacityPoolCloser struct { - byteSlicePool - returnCapacity int -} - -func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { - if delta > 0 { - n.returnCapacity = -1 * delta - } - n.byteSlicePool.ModifyCapacity(delta) -} - -func (n *returnCapacityPoolCloser) Close() { - if n.returnCapacity < 0 { - n.byteSlicePool.ModifyCapacity(n.returnCapacity) - } -} - -type sliceAllocator func() *[]byte - -var newByteSlicePool = func(sliceSize int64) byteSlicePool { - return newMaxSlicePool(sliceSize) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go deleted file mode 100644 index ce117c32a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go +++ /dev/null @@ -1,65 +0,0 @@ -package manager - -import ( - "io" - "sync" -) - -// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker -type ReadSeekerWriteTo interface { - io.ReadSeeker - io.WriterTo -} - -// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt -// implementation. -type BufferedReadSeekerWriteTo struct { - *BufferedReadSeeker -} - -// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or -// an error occurs. Returns the number of bytes written and any error encountered during the write. -func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) { - return io.Copy(writer, b.BufferedReadSeeker) -} - -// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker -type ReadSeekerWriteToProvider interface { - GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) -} - -// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse -// []byte slices for buffering parts in memory -type BufferedReadSeekerWriteToPool struct { - pool sync.Pool -} - -// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create -// a pool of reusable buffers . If size is less then < 64 KiB then the buffer -// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom -// respectively will default to copying 32 KiB. -func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool { - if size < 65536 { - size = 65536 - } - - return &BufferedReadSeekerWriteToPool{ - pool: sync.Pool{New: func() interface{} { - return make([]byte, size) - }}, - } -} - -// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo. -// The provided cleanup must be called after operations have been completed on the -// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool. -func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) { - buffer := p.pool.Get().([]byte) - - r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)} - cleanup = func() { - p.pool.Put(buffer) - } - - return r, cleanup -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go deleted file mode 100644 index 968f90732..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go +++ /dev/null @@ -1,187 +0,0 @@ -package manager - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the -// SDK to accept an io.Reader that is not also an io.Seeker for unsigned -// streaming payload API operations. -// -// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's -// input will prevent that operation being retried in the case of -// network errors, and cause operation requests to fail if yhe operation -// requires payload signing. -// -// Note: If using with S3 PutObject to stream an object upload. The SDK's S3 -// Upload Manager(s3manager.Uploader) provides support for streaming -// with the ability to retry network errors. -func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser { - return &ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// seekerLen attempts to get the number of bytes remaining at the seeker's -// current position. Returns the number of bytes remaining or error. -func seekerLen(s io.Seeker) (int64, error) { - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := s.(type) { - case *ReaderSeekerCloser: - return v.GetLen() - } - - return computeSeekerLength(s) -} - -// GetLen returns the length of the bytes remaining in the underlying reader. -// Checks first for Len(), then io.Seeker to determine the size of the -// underlying reader. -// -// Will return -1 if the length cannot be determined. -func (r *ReaderSeekerCloser) GetLen() (int64, error) { - if l, ok := r.HasLen(); ok { - return int64(l), nil - } - - if s, ok := r.r.(io.Seeker); ok { - return computeSeekerLength(s) - } - - return -1, nil -} - -func computeSeekerLength(s io.Seeker) (int64, error) { - curOffset, err := s.Seek(0, io.SeekCurrent) - if err != nil { - return 0, err - } - - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, err - } - - _, err = s.Seek(curOffset, io.SeekStart) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil -} - -// HasLen returns the length of the underlying reader if the value implements -// the Len() int method. -func (r *ReaderSeekerCloser) HasLen() (int, bool) { - type lenner interface { - Len() int - } - - if lr, ok := r.r.(lenner); ok { - return lr.Len(), true - } - - return 0, false -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be -// returned. -// -// Performs the same functionality as io.Reader Read -func (r *ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// IsSeeker returns if the underlying reader is also a seeker. -func (r *ReaderSeekerCloser) IsSeeker() bool { - _, ok := r.r.(io.Seeker) - return ok -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r *ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go deleted file mode 100644 index c6d04f1e9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go +++ /dev/null @@ -1,905 +0,0 @@ -package manager - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/http" - "sort" - "sync" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/awsutil" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithymiddleware "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// MaxUploadParts is the maximum allowed number of parts in a multi-part upload -// on Amazon S3. -const MaxUploadParts int32 = 10000 - -// MinUploadPartSize is the minimum allowed part size when uploading a part to -// Amazon S3. -const MinUploadPartSize int64 = 1024 * 1024 * 5 - -// DefaultUploadPartSize is the default part size to buffer chunks of a -// payload into. -const DefaultUploadPartSize = MinUploadPartSize - -// DefaultUploadConcurrency is the default number of goroutines to spin up when -// using Upload(). -const DefaultUploadConcurrency = 5 - -// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned -// will satisfy this interface when a multi part upload failed to upload all -// chucks to S3. In the case of a failure the UploadID is needed to operate on -// the chunks, if any, which were uploaded. -// -// Example: -// -// u := manager.NewUploader(client) -// output, err := u.upload(context.Background(), input) -// if err != nil { -// var multierr manager.MultiUploadFailure -// if errors.As(err, &multierr) { -// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error()) -// } else { -// fmt.Printf("upload failure, %s\n", err.Error()) -// } -// } -type MultiUploadFailure interface { - error - - // UploadID returns the upload id for the S3 multipart upload that failed. - UploadID() string -} - -// A multiUploadError wraps the upload ID of a failed s3 multipart upload. -// Composed of BaseError for code, message, and original error -// -// Should be used for an error that occurred failing a S3 multipart upload, -// and a upload ID is available. If an uploadID is not available a more relevant -type multiUploadError struct { - err error - - // ID for multipart upload which failed. - uploadID string -} - -// batchItemError returns the string representation of the error. -// -// # See apierr.BaseError ErrorWithExtra for output format -// -// Satisfies the error interface. -func (m *multiUploadError) Error() string { - var extra string - if m.err != nil { - extra = fmt.Sprintf(", cause: %s", m.err.Error()) - } - return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra) -} - -// Unwrap returns the underlying error that cause the upload failure -func (m *multiUploadError) Unwrap() error { - return m.err -} - -// UploadID returns the id of the S3 upload which failed. -func (m *multiUploadError) UploadID() string { - return m.uploadID -} - -// UploadOutput represents a response from the Upload() call. -type UploadOutput struct { - // The URL where the object was uploaded to. - Location string - - // The ID for a multipart upload to S3. In the case of an error the error - // can be cast to the MultiUploadFailure interface to extract the upload ID. - // Will be empty string if multipart upload was not used, and the object - // was uploaded as a single PutObject call. - UploadID string - - // The list of parts that were uploaded and their checksums. Will be empty - // if multipart upload was not used, and the object was uploaded as a - // single PutObject call. - CompletedParts []types.CompletedPart - - // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool - - // The base64-encoded, 32-bit CRC32 checksum of the object. - ChecksumCRC32 *string - - // The base64-encoded, 32-bit CRC32C checksum of the object. - ChecksumCRC32C *string - - // The base64-encoded, 160-bit SHA-1 digest of the object. - ChecksumSHA1 *string - - // The base64-encoded, 256-bit SHA-256 digest of the object. - ChecksumSHA256 *string - - // Entity tag for the uploaded object. - ETag *string - - // If the object expiration is configured, this will contain the expiration date - // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string - - // The object key of the newly created object. - Key *string - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged types.RequestCharged - - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. - SSEKMSKeyId *string - - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS customer master key (CMK) in your - // initiate multipart upload request, the response includes this header. It - // confirms the encryption algorithm that Amazon S3 used to encrypt the object. - ServerSideEncryption types.ServerSideEncryption - - // The version of the object that was uploaded. Will only be populated if - // the S3 Bucket is versioned. If the bucket is not versioned this field - // will not be set. - VersionID *string -} - -// WithUploaderRequestOptions appends to the Uploader's API client options. -func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) { - return func(u *Uploader) { - u.ClientOptions = append(u.ClientOptions, opts...) - } -} - -// The Uploader structure that calls Upload(). It is safe to call Upload() -// on this structure for multiple objects and across concurrent goroutines. -// Mutating the Uploader's properties is not safe to be done concurrently. -// -// # Pre-computed Checksums -// -// Care must be taken when using pre-computed checksums the transfer upload -// manager. The format and value of the checksum differs based on if the upload -// will preformed as a single or multipart upload. -// -// Uploads that are smaller than the Uploader's PartSize will be uploaded using -// the PutObject API operation. Pre-computed checksum of the uploaded object's -// content are valid for these single part uploads. If the checksum provided -// does not match the uploaded content the upload will fail. -// -// Uploads that are larger than the Uploader's PartSize will be uploaded using -// multi-part upload. The Pre-computed checksums for these uploads are a -// checksum of checksums of each part. Not a checksum of the full uploaded -// bytes. With the format of "-", (e.g. -// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match -// this format, as matches the content uploaded, the upload will fail. -// -// ContentMD5 for multipart upload is explicitly ignored for multipart upload, -// and its value is suppressed. -// -// # Automatically Computed Checksums -// -// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput -// is set to a valid value, the SDK will automatically compute the checksum of -// the individual uploaded parts. The UploadOutput result from Upload will -// include the checksum of part checksums provided by S3 -// CompleteMultipartUpload API call. -type Uploader struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultUploadPartSize value will be used. - PartSize int64 - - // The number of goroutines to spin up in parallel per call to Upload when - // sending parts. If this is set to zero, the DefaultUploadConcurrency value - // will be used. - // - // The concurrency pool is not shared between calls to Upload. - Concurrency int - - // Setting this value to true will cause the SDK to avoid calling - // AbortMultipartUpload on a failure, leaving all successfully uploaded - // parts on S3 for manual recovery. - // - // Note that storing parts of an incomplete multipart upload counts towards - // space usage on S3 and will add additional costs if not cleaned up. - LeavePartsOnError bool - - // MaxUploadParts is the max number of parts which will be uploaded to S3. - // Will be used to calculate the partsize of the object to be uploaded. - // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file - // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts). - // - // MaxUploadParts must not be used to limit the total number of bytes uploaded. - // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader) - // instead. An io.LimitReader is helpful when uploading an unbounded reader - // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned - // error must be used to signal end of stream. - // - // Defaults to package const's MaxUploadParts value. - MaxUploadParts int32 - - // The client to use when uploading to S3. - S3 UploadAPIClient - - // List of request options that will be passed down to individual API - // operation requests made by the uploader. - ClientOptions []func(*s3.Options) - - // Defines the buffer strategy used when uploading a part - BufferProvider ReadSeekerWriteToProvider - - // partPool allows for the re-usage of streaming payload part buffers between upload calls - partPool byteSlicePool -} - -// NewUploader creates a new Uploader instance to upload objects to S3. Pass In -// additional functional options to customize the uploader's behavior. Requires a -// client.ConfigProvider in order to create a S3 service client. The session.Session -// satisfies the client.ConfigProvider interface. -// -// Example: -// -// // Load AWS Config -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } -// -// // Create an S3 Client with the config -// client := s3.NewFromConfig(cfg) -// -// // Create an uploader passing it the client -// uploader := manager.NewUploader(client) -// -// // Create an uploader with the client and custom options -// uploader := manager.NewUploader(client, func(u *manager.Uploader) { -// u.PartSize = 64 * 1024 * 1024 // 64MB per part -// }) -func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader { - u := &Uploader{ - S3: client, - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - MaxUploadParts: MaxUploadParts, - BufferProvider: defaultUploadBufferProvider(), - } - - for _, option := range options { - option(u) - } - - u.partPool = newByteSlicePool(u.PartSize) - - return u -} - -// Upload uploads an object to S3, intelligently buffering large -// files into smaller chunks and sending them in parallel across multiple -// goroutines. You can configure the buffer size and concurrency through the -// Uploader parameters. -// -// Additional functional options can be provided to configure the individual -// upload. These options are copies of the Uploader instance Upload is called from. -// Modifying the options will not impact the original Uploader instance. -// -// Use the WithUploaderRequestOptions helper function to pass in request -// options that will be applied to all API operations made with this uploader. -// -// It is safe to call this method concurrently across goroutines. -func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) ( - *UploadOutput, error, -) { - i := uploader{in: input, cfg: u, ctx: ctx} - - // Copy ClientOptions - clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1) - clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, - middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), - addFeatureUserAgent, // yes, there are two of these - func(s *smithymiddleware.Stack) error { - return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) - }, - ) - }) - clientOptions = append(clientOptions, i.cfg.ClientOptions...) - i.cfg.ClientOptions = clientOptions - - for _, opt := range opts { - opt(&i.cfg) - } - - return i.upload() -} - -// internal structure to manage an upload to S3. -type uploader struct { - ctx context.Context - cfg Uploader - - in *s3.PutObjectInput - - readerPos int64 // current reader position - totalSize int64 // set to -1 if the size is not known -} - -// internal logic for deciding whether to upload a single part or use a -// multipart upload. -func (u *uploader) upload() (*UploadOutput, error) { - if err := u.init(); err != nil { - return nil, fmt.Errorf("unable to initialize upload: %w", err) - } - defer u.cfg.partPool.Close() - - if u.cfg.PartSize < MinUploadPartSize { - return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize) - } - - // Do one read to determine if we have more than one part - reader, _, cleanup, err := u.nextReader() - if err == io.EOF { // single part - return u.singlePart(reader, cleanup) - } else if err != nil { - cleanup() - return nil, fmt.Errorf("read upload data failed: %w", err) - } - - mu := multiuploader{uploader: u} - return mu.upload(reader, cleanup) -} - -// init will initialize all default options. -func (u *uploader) init() error { - if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil { - return err - } - - if u.cfg.Concurrency == 0 { - u.cfg.Concurrency = DefaultUploadConcurrency - } - if u.cfg.PartSize == 0 { - u.cfg.PartSize = DefaultUploadPartSize - } - if u.cfg.MaxUploadParts == 0 { - u.cfg.MaxUploadParts = MaxUploadParts - } - - // Try to get the total size for some optimizations - if err := u.initSize(); err != nil { - return err - } - - // If PartSize was changed or partPool was never setup then we need to allocated a new pool - // so that we return []byte slices of the correct size - poolCap := u.cfg.Concurrency + 1 - if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize { - u.cfg.partPool = newByteSlicePool(u.cfg.PartSize) - u.cfg.partPool.ModifyCapacity(poolCap) - } else { - u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool} - u.cfg.partPool.ModifyCapacity(poolCap) - } - - return nil -} - -// initSize tries to detect the total stream size, setting u.totalSize. If -// the size is not known, totalSize is set to -1. -func (u *uploader) initSize() error { - u.totalSize = -1 - - switch r := u.in.Body.(type) { - case io.Seeker: - n, err := seekerLen(r) - if err != nil { - return err - } - u.totalSize = n - - // Try to adjust partSize if it is too small and account for - // integer division truncation. - if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) { - // Add one to the part size to account for remainders - // during the size calculation. e.g odd number of bytes. - u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1 - } - } - - return nil -} - -// nextReader returns a seekable reader representing the next packet of data. -// This operation increases the shared u.readerPos counter, but note that it -// does not need to be wrapped in a mutex because nextReader is only called -// from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) { - switch r := u.in.Body.(type) { - case readerAtSeeker: - var err error - - n := u.cfg.PartSize - if u.totalSize >= 0 { - bytesLeft := u.totalSize - u.readerPos - - if bytesLeft <= u.cfg.PartSize { - err = io.EOF - n = bytesLeft - } - } - - var ( - reader io.ReadSeeker - cleanup func() - ) - - reader = io.NewSectionReader(r, u.readerPos, n) - if u.cfg.BufferProvider != nil { - reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader) - } else { - cleanup = func() {} - } - - u.readerPos += n - - return reader, int(n), cleanup, err - - default: - part, err := u.cfg.partPool.Get(u.ctx) - if err != nil { - return nil, 0, func() {}, err - } - - n, err := readFillBuf(r, *part) - u.readerPos += int64(n) - - cleanup := func() { - u.cfg.partPool.Put(part) - } - - return bytes.NewReader((*part)[0:n]), n, cleanup, err - } -} - -func readFillBuf(r io.Reader, b []byte) (offset int, err error) { - for offset < len(b) && err == nil { - var n int - n, err = r.Read(b[offset:]) - offset += n - } - - return offset, err -} - -// singlePart contains upload logic for uploading a single chunk via -// a regular PutObject request. Multipart requests require at least two -// parts, or at least 5MB of data. -func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - defer cleanup() - - var params s3.PutObjectInput - awsutil.Copy(¶ms, u.in) - params.Body = r - - // Need to use request form because URL generated in request is - // used in return. - - var locationRecorder recordLocationClient - out, err := u.cfg.S3.PutObject(u.ctx, ¶ms, - append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) - if err != nil { - return nil, err - } - - return &UploadOutput{ - Location: locationRecorder.location, - - BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled), - ChecksumCRC32: out.ChecksumCRC32, - ChecksumCRC32C: out.ChecksumCRC32C, - ChecksumSHA1: out.ChecksumSHA1, - ChecksumSHA256: out.ChecksumSHA256, - ETag: out.ETag, - Expiration: out.Expiration, - Key: params.Key, - RequestCharged: out.RequestCharged, - SSEKMSKeyId: out.SSEKMSKeyId, - ServerSideEncryption: out.ServerSideEncryption, - VersionID: out.VersionId, - }, nil -} - -type httpClient interface { - Do(r *http.Request) (*http.Response, error) -} - -type recordLocationClient struct { - httpClient - location string -} - -func (c *recordLocationClient) WrapClient() func(o *s3.Options) { - return func(o *s3.Options) { - c.httpClient = o.HTTPClient - o.HTTPClient = c - } -} - -func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) { - resp, err = c.httpClient.Do(r) - if err != nil { - return resp, err - } - - if resp.Request != nil && resp.Request.URL != nil { - url := *resp.Request.URL - url.RawQuery = "" - c.location = url.String() - } - - return resp, err -} - -// internal structure to manage a specific multipart upload to S3. -type multiuploader struct { - *uploader - wg sync.WaitGroup - m sync.Mutex - err error - uploadID string - parts completedParts -} - -// keeps track of a single chunk of data being sent to S3. -type chunk struct { - buf io.ReadSeeker - num int32 - cleanup func() -} - -// completedParts is a wrapper to make parts sortable by their part number, -// since S3 required this list to be sent in sorted order. -type completedParts []types.CompletedPart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { - return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber) -} - -// upload will perform a multipart upload using the firstBuf buffer containing -// the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { - u.initChecksumAlgorithm() - - var params s3.CreateMultipartUploadInput - awsutil.Copy(¶ms, u.in) - - // Create the multipart - var locationRecorder recordLocationClient - resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, ¶ms, - append(u.cfg.ClientOptions, locationRecorder.WrapClient())...) - if err != nil { - cleanup() - return nil, err - } - u.uploadID = *resp.UploadId - - // Create the workers - ch := make(chan chunk, u.cfg.Concurrency) - for i := 0; i < u.cfg.Concurrency; i++ { - u.wg.Add(1) - go u.readChunk(ch) - } - - // Send part 1 to the workers - var num int32 = 1 - ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup} - - // Read and queue the rest of the parts - for u.geterr() == nil && err == nil { - var ( - reader io.ReadSeeker - nextChunkLen int - ok bool - ) - - reader, nextChunkLen, cleanup, err = u.nextReader() - ok, err = u.shouldContinue(num, nextChunkLen, err) - if !ok { - cleanup() - if err != nil { - u.seterr(err) - } - break - } - - num++ - - ch <- chunk{buf: reader, num: num, cleanup: cleanup} - } - - // Close the channel, wait for workers, and complete upload - close(ch) - u.wg.Wait() - completeOut := u.complete() - - if err := u.geterr(); err != nil { - return nil, &multiUploadError{ - err: err, - uploadID: u.uploadID, - } - } - - return &UploadOutput{ - Location: locationRecorder.location, - UploadID: u.uploadID, - CompletedParts: u.parts, - - BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled), - ChecksumCRC32: completeOut.ChecksumCRC32, - ChecksumCRC32C: completeOut.ChecksumCRC32C, - ChecksumSHA1: completeOut.ChecksumSHA1, - ChecksumSHA256: completeOut.ChecksumSHA256, - ETag: completeOut.ETag, - Expiration: completeOut.Expiration, - Key: completeOut.Key, - RequestCharged: completeOut.RequestCharged, - SSEKMSKeyId: completeOut.SSEKMSKeyId, - ServerSideEncryption: completeOut.ServerSideEncryption, - VersionID: completeOut.VersionId, - }, nil -} - -func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) { - if err != nil && err != io.EOF { - return false, fmt.Errorf("read multipart upload data failed, %w", err) - } - - if nextChunkLen == 0 { - // No need to upload empty part, if file was empty to start - // with empty single part would of been created and never - // started multipart upload. - return false, nil - } - - part++ - // This upload exceeded maximum number of supported parts, error now. - if part > u.cfg.MaxUploadParts || part > MaxUploadParts { - var msg string - if part > u.cfg.MaxUploadParts { - msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", - u.cfg.MaxUploadParts) - } else { - msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", - MaxUploadParts) - } - return false, errors.New(msg) - } - - return true, err -} - -// readChunk runs in worker goroutines to pull chunks off of the ch channel -// and send() them as UploadPart requests. -func (u *multiuploader) readChunk(ch chan chunk) { - defer u.wg.Done() - for { - data, ok := <-ch - - if !ok { - break - } - - if u.geterr() == nil { - if err := u.send(data); err != nil { - u.seterr(err) - } - } - - data.cleanup() - } -} - -// send performs an UploadPart request and keeps track of the completed -// part information. -func (u *multiuploader) send(c chunk) error { - params := &s3.UploadPartInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - Body: c.buf, - SSECustomerAlgorithm: u.in.SSECustomerAlgorithm, - SSECustomerKey: u.in.SSECustomerKey, - SSECustomerKeyMD5: u.in.SSECustomerKeyMD5, - ExpectedBucketOwner: u.in.ExpectedBucketOwner, - RequestPayer: u.in.RequestPayer, - - ChecksumAlgorithm: u.in.ChecksumAlgorithm, - // Invalid to set any of the individual ChecksumXXX members from - // PutObject as they are never valid for individual parts of a - // multipart upload. - - PartNumber: aws.Int32(c.num), - UploadId: &u.uploadID, - } - // TODO should do copy then clear? - - resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...) - if err != nil { - return err - } - - var completed types.CompletedPart - awsutil.Copy(&completed, resp) - completed.PartNumber = aws.Int32(c.num) - - u.m.Lock() - u.parts = append(u.parts, completed) - u.m.Unlock() - - return nil -} - -func (u *multiuploader) initChecksumAlgorithm() { - if u.in.ChecksumAlgorithm != "" { - return - } - - switch { - case u.in.ChecksumCRC32 != nil: - u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 - case u.in.ChecksumCRC32C != nil: - u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32c - case u.in.ChecksumSHA1 != nil: - u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha1 - case u.in.ChecksumSHA256 != nil: - u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha256 - default: - u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 - } -} - -// geterr is a thread-safe getter for the error object -func (u *multiuploader) geterr() error { - u.m.Lock() - defer u.m.Unlock() - - return u.err -} - -// seterr is a thread-safe setter for the error object -func (u *multiuploader) seterr(e error) { - u.m.Lock() - defer u.m.Unlock() - - u.err = e -} - -// fail will abort the multipart unless LeavePartsOnError is set to true. -func (u *multiuploader) fail() { - if u.cfg.LeavePartsOnError { - return - } - - params := &s3.AbortMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadId: &u.uploadID, - } - _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...) - if err != nil { - // TODO: Add logging - //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err)) - _ = err - } -} - -// complete successfully completes a multipart upload and returns the response. -func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { - if u.geterr() != nil { - u.fail() - return nil - } - - // Parts must be sorted in PartNumber order. - sort.Sort(u.parts) - - var params s3.CompleteMultipartUploadInput - awsutil.Copy(¶ms, u.in) - params.UploadId = &u.uploadID - params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts} - - resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, ¶ms, u.cfg.ClientOptions...) - if err != nil { - u.seterr(err) - u.fail() - } - - return resp -} - -type readerAtSeeker interface { - io.ReaderAt - io.ReadSeeker -} - -// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets, -// which is required when uploading to those through transfer manager. -type setS3ExpressDefaultChecksum struct{} - -func (*setS3ExpressDefaultChecksum) ID() string { - return "setS3ExpressDefaultChecksum" -} - -func (*setS3ExpressDefaultChecksum) HandleFinalize( - ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler, -) ( - out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error, -) { - const checksumHeader = "x-amz-checksum-algorithm" - - if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express { - return next.HandleFinalize(ctx, in) - } - - // If this is CreateMultipartUpload we need to ensure the checksum - // algorithm header is present. Otherwise everything is driven off the - // context setting and we can let it flow from there. - if middleware.GetOperationName(ctx) == "CreateMultipartUpload" { - r, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { - r.Header.Set(checksumHeader, "CRC32") - } - return next.HandleFinalize(ctx, in) - } else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32)) - } - - return next.HandleFinalize(ctx, in) -} - -func addFeatureUserAgent(stack *smithymiddleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(middleware.UserAgentFeatureS3Transfer) - return nil -} - -func getOrAddRequestUserAgent(stack *smithymiddleware.Stack) (*middleware.RequestUserAgent, error) { - id := (*middleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = middleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, smithymiddleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*middleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go deleted file mode 100644 index fb10ec309..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go +++ /dev/null @@ -1,83 +0,0 @@ -package manager - -import ( - "bufio" - "io" - "sync" - - "github.com/aws/aws-sdk-go-v2/internal/sdkio" -) - -// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom -type WriterReadFrom interface { - io.Writer - io.ReaderFrom -} - -// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer -type WriterReadFromProvider interface { - GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func()) -} - -type bufferedWriter interface { - WriterReadFrom - Flush() error - Reset(io.Writer) -} - -type bufferedReadFrom struct { - bufferedWriter -} - -func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) { - n, err := b.bufferedWriter.ReadFrom(r) - if flushErr := b.Flush(); flushErr != nil && err == nil { - err = flushErr - } - return n, err -} - -// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool -// to manage allocation and reuse of *bufio.Writer structures. -type PooledBufferedReadFromProvider struct { - pool sync.Pool -} - -// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider -// Size is used to control the size of the underlying *bufio.Writer created for -// calls to GetReadFrom. -func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider { - if size < int(32*sdkio.KibiByte) { - size = int(64 * sdkio.KibiByte) - } - - return &PooledBufferedReadFromProvider{ - pool: sync.Pool{ - New: func() interface{} { - return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)} - }, - }, - } -} - -// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom -// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom -// has been completed in order to allow the reuse of the *bufio.Writer -func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) { - buffer := p.pool.Get().(*bufferedReadFrom) - buffer.Reset(writer) - r = buffer - cleanup = func() { - buffer.Reset(nil) // Reset to nil writer to release reference - p.pool.Put(buffer) - } - return r, cleanup -} - -type suppressWriterAt struct { - suppressed io.Reader -} - -func (s *suppressWriterAt) Read(p []byte) (n int, err error) { - return s.suppressed.Read(p) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go deleted file mode 100644 index 0b81db548..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go +++ /dev/null @@ -1,45 +0,0 @@ -package auth - -import ( - "github.com/aws/smithy-go/auth" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme -// for pre-existing implementations where the signer was added to client -// config. SDK clients will key off of this type and ensure per-operation -// updates to those signers persist on the scheme itself. -type HTTPAuthScheme struct { - schemeID string - signer smithyhttp.Signer -} - -var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil) - -// NewHTTPAuthScheme returns an auth scheme instance with the given config. -func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme { - return &HTTPAuthScheme{ - schemeID: schemeID, - signer: signer, - } -} - -// SchemeID identifies the auth scheme. -func (s *HTTPAuthScheme) SchemeID() string { - return s.schemeID -} - -// IdentityResolver gets the identity resolver for the auth scheme. -func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -// Signer gets the signer for the auth scheme. -func (s *HTTPAuthScheme) Signer() smithyhttp.Signer { - return s.signer -} - -// WithSigner returns a new instance of the auth scheme with the updated signer. -func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme { - return NewHTTPAuthScheme(s.schemeID, signer) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go deleted file mode 100644 index bbc2ec06e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go +++ /dev/null @@ -1,191 +0,0 @@ -package auth - -import ( - "context" - "fmt" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -// SigV4 is a constant representing -// Authentication Scheme Signature Version 4 -const SigV4 = "sigv4" - -// SigV4A is a constant representing -// Authentication Scheme Signature Version 4A -const SigV4A = "sigv4a" - -// SigV4S3Express identifies the S3 S3Express auth scheme. -const SigV4S3Express = "sigv4-s3express" - -// None is a constant representing the -// None Authentication Scheme -const None = "none" - -// SupportedSchemes is a data structure -// that indicates the list of supported AWS -// authentication schemes -var SupportedSchemes = map[string]bool{ - SigV4: true, - SigV4A: true, - SigV4S3Express: true, - None: true, -} - -// AuthenticationScheme is a representation of -// AWS authentication schemes -type AuthenticationScheme interface { - isAuthenticationScheme() -} - -// AuthenticationSchemeV4 is a AWS SigV4 representation -type AuthenticationSchemeV4 struct { - Name string - SigningName *string - SigningRegion *string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} - -// AuthenticationSchemeV4A is a AWS SigV4A representation -type AuthenticationSchemeV4A struct { - Name string - SigningName *string - SigningRegionSet []string - DisableDoubleEncoding *bool -} - -func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} - -// AuthenticationSchemeNone is a representation for the none auth scheme -type AuthenticationSchemeNone struct{} - -func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} - -// NoAuthenticationSchemesFoundError is used in signaling -// that no authentication schemes have been specified. -type NoAuthenticationSchemesFoundError struct{} - -func (e *NoAuthenticationSchemesFoundError) Error() string { - return fmt.Sprint("No authentication schemes specified.") -} - -// UnSupportedAuthenticationSchemeSpecifiedError is used in -// signaling that only unsupported authentication schemes -// were specified. -type UnSupportedAuthenticationSchemeSpecifiedError struct { - UnsupportedSchemes []string -} - -func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { - return fmt.Sprint("Unsupported authentication scheme specified.") -} - -// GetAuthenticationSchemes extracts the relevant authentication scheme data -// into a custom strongly typed Go data structure. -func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { - var result []AuthenticationScheme - if !p.Has("authSchemes") { - return nil, &NoAuthenticationSchemesFoundError{} - } - - authSchemes, _ := p.Get("authSchemes").([]interface{}) - - var unsupportedSchemes []string - for _, scheme := range authSchemes { - authScheme, _ := scheme.(map[string]interface{}) - - version := authScheme["name"].(string) - switch version { - case SigV4, SigV4S3Express: - v4Scheme := AuthenticationSchemeV4{ - Name: version, - SigningName: getSigningName(authScheme), - SigningRegion: getSigningRegion(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4Scheme)) - case SigV4A: - v4aScheme := AuthenticationSchemeV4A{ - Name: SigV4A, - SigningName: getSigningName(authScheme), - SigningRegionSet: getSigningRegionSet(authScheme), - DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), - } - result = append(result, AuthenticationScheme(&v4aScheme)) - case None: - noneScheme := AuthenticationSchemeNone{} - result = append(result, AuthenticationScheme(&noneScheme)) - default: - unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) - continue - } - } - - if len(result) == 0 { - return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ - UnsupportedSchemes: unsupportedSchemes, - } - } - - return result, nil -} - -type disableDoubleEncoding struct{} - -// SetDisableDoubleEncoding sets or modifies the disable double encoding option -// on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) -} - -// GetDisableDoubleEncoding retrieves the disable double encoding option -// from the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { - value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) - return value, ok -} - -func getSigningName(authScheme map[string]interface{}) *string { - signingName, ok := authScheme["signingName"].(string) - if !ok || signingName == "" { - return nil - } - return &signingName -} - -func getSigningRegionSet(authScheme map[string]interface{}) []string { - untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) - if !ok { - return nil - } - signingRegionSet := []string{} - for _, item := range untypedSigningRegionSet { - signingRegionSet = append(signingRegionSet, item.(string)) - } - return signingRegionSet -} - -func getSigningRegion(authScheme map[string]interface{}) *string { - signingRegion, ok := authScheme["signingRegion"].(string) - if !ok || signingRegion == "" { - return nil - } - return &signingRegion -} - -func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { - disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) - if !ok { - return nil - } - return &disableDoubleEncoding -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go deleted file mode 100644 index f059b5d39..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go +++ /dev/null @@ -1,43 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" -) - -// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity. -type BearerTokenAdapter struct { - Token bearer.Token -} - -var _ auth.Identity = (*BearerTokenAdapter)(nil) - -// Expiration returns the time of expiration for the token. -func (v *BearerTokenAdapter) Expiration() time.Time { - return v.Token.Expires -} - -// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy -// auth.IdentityResolver. -type BearerTokenProviderAdapter struct { - Provider bearer.TokenProvider -} - -var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil) - -// GetIdentity retrieves a bearer token using the underlying provider. -func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - token, err := v.Provider.RetrieveBearerToken(ctx) - if err != nil { - return nil, fmt.Errorf("get token: %w", err) - } - - return &BearerTokenAdapter{Token: token}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go deleted file mode 100644 index a88281527..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go +++ /dev/null @@ -1,35 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/auth/bearer" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http -// auth.Signer. -type BearerTokenSignerAdapter struct { - Signer bearer.Signer -} - -var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil) - -// SignRequest signs the request with the provided bearer token. -func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error { - ca, ok := identity.(*BearerTokenAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r) - if err != nil { - return fmt.Errorf("sign request: %w", err) - } - - *r = *signed.(*smithyhttp.Request) - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go deleted file mode 100644 index f926c4aaa..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go +++ /dev/null @@ -1,46 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// CredentialsAdapter adapts aws.Credentials to auth.Identity. -type CredentialsAdapter struct { - Credentials aws.Credentials -} - -var _ auth.Identity = (*CredentialsAdapter)(nil) - -// Expiration returns the time of expiration for the credentials. -func (v *CredentialsAdapter) Expiration() time.Time { - return v.Credentials.Expires -} - -// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver. -type CredentialsProviderAdapter struct { - Provider aws.CredentialsProvider -} - -var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) - -// GetIdentity retrieves AWS credentials using the underlying provider. -func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - if v.Provider == nil { - return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil - } - - creds, err := v.Provider.Retrieve(ctx) - if err != nil { - return nil, fmt.Errorf("get credentials: %w", err) - } - - return &CredentialsAdapter{Credentials: creds}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go deleted file mode 100644 index 42b458673..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions. -package smithy diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go deleted file mode 100644 index 24db8e144..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ /dev/null @@ -1,57 +0,0 @@ -package smithy - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer. -type V4SignerAdapter struct { - Signer v4.HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil) - -// SignRequest signs the request with the provided identity. -func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4 signing name is required") - } - - region, ok := smithyhttp.GetSigV4SigningRegion(&props) - if !ok { - return fmt.Errorf("sigv4 signing region is required") - } - - hash := v4.GetPayloadHash(ctx) - signingTime := sdk.NowTime() - skew := internalcontext.GetAttemptSkewContext(ctx) - signingTime = signingTime.Add(skew) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %w", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go deleted file mode 100644 index 938cd14c1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go +++ /dev/null @@ -1,112 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" - "time" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - if _, ok := src.Interface().(*time.Time); !ok { - if dst.Kind() == reflect.String { - dst.SetString(e.String()) - } else { - dst.Set(reflect.New(e)) - } - } else { - tempValue := reflect.New(e) - tempValue.Elem().Set(src.Elem()) - // Sets time.Time's unexported values - dst.Set(tempValue) - } - } - if dst.Kind() != reflect.String && src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go deleted file mode 100644 index bcfe51a2b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go +++ /dev/null @@ -1,33 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - // Special casing for strings as typed enumerations are string aliases - // but are not deep equal. - if ra.Kind() == reflect.String && rb.Kind() == reflect.String { - return ra.String() == rb.String() - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go deleted file mode 100644 index 1adecae6b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go +++ /dev/null @@ -1,131 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - isPtr := false - for v.Kind() == reflect.Ptr { - isPtr = true - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - strtype := v.Type().String() - if strtype == "[]uint8" { - fmt.Fprintf(buf, " len %d", v.Len()) - break - } - - nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - if isPtr { - buf.WriteRune('&') - } - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - - for v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - - if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice { - prettify(v, indent, buf) - return - } - - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go deleted file mode 100644 index 645df2450..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go +++ /dev/null @@ -1,88 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - for i := 0; i < v.Type().NumField(); i++ { - ft := v.Type().Field(i) - fv := v.Field(i) - - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue // ignore unexported fields - } - if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { - continue // ignore unset fields - } - - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(ft.Name + ": ") - - if tag := ft.Tag.Get("sensitive"); tag == "true" { - buf.WriteString("") - } else { - stringValue(fv, indent+2, buf) - } - - buf.WriteString(",\n") - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md deleted file mode 100644 index ec84287b5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ /dev/null @@ -1,374 +0,0 @@ -# v1.3.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.43 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.42 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.41 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.40 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.39 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.38 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.37 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.36 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.35 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.34 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.33 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.32 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.31 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.30 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.29 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.28 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.19 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.18 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.17 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.16 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.15 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.14 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.13 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.12 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.11 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.10 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.9 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.8 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.7 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.7 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.6 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.5 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.4 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.2 (2021-08-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-06-25) - -* **Release**: Release new modules -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go deleted file mode 100644 index cd4d19b89..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package configsources - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" -) - -// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value -// for Enable Endpoint Discovery -type EnableEndpointDiscoveryProvider interface { - GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) -} - -// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. -// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, -// and error if one is encountered. -func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { - value, found, err = p.GetEnableEndpointDiscovery(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint -type UseDualStackEndpointProvider interface { - GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) -} - -// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseDualStackEndpointProvider); ok { - value, found, err = p.GetUseDualStackEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} - -// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint -type UseFIPSEndpointProvider interface { - GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) -} - -// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. -// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseFIPSEndpointProvider); ok { - value, found, err = p.GetUseFIPSEndpoint(ctx) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go deleted file mode 100644 index e7835f852..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go +++ /dev/null @@ -1,57 +0,0 @@ -package configsources - -import ( - "context" -) - -// ServiceBaseEndpointProvider is needed to search for all providers -// that provide a configured service endpoint -type ServiceBaseEndpointProvider interface { - GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) -} - -// IgnoreConfiguredEndpointsProvider is needed to search for all providers -// that provide a flag to disable configured endpoints. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -type IgnoreConfiguredEndpointsProvider interface { - GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) -} - -// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured -// endpoints feature. -// -// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because -// service packages cannot import github.com/aws/aws-sdk-go-v2/config -// due to result import cycle error. -func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { - value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources -// while allowing for configured endpoints to be disabled -func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { - if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { - return "", false, nil - } - - for _, cs := range configs { - if p, ok := cs.(ServiceBaseEndpointProvider); ok { - value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go deleted file mode 100644 index 7ad6eaf9d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package configsources - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go deleted file mode 100644 index f0c283d39..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go +++ /dev/null @@ -1,52 +0,0 @@ -package context - -import ( - "context" - "time" - - "github.com/aws/smithy-go/middleware" -) - -type s3BackendKey struct{} -type checksumInputAlgorithmKey struct{} -type clockSkew struct{} - -const ( - // S3BackendS3Express identifies the S3Express backend - S3BackendS3Express = "S3Express" -) - -// SetS3Backend stores the resolved endpoint backend within the request -// context, which is required for a variety of custom S3 behaviors. -func SetS3Backend(ctx context.Context, typ string) context.Context { - return middleware.WithStackValue(ctx, s3BackendKey{}, typ) -} - -// GetS3Backend retrieves the stored endpoint backend within the context. -func GetS3Backend(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) - return v -} - -// SetChecksumInputAlgorithm sets the request checksum algorithm on the -// context. -func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) -} - -// GetChecksumInputAlgorithm returns the checksum algorithm from the context. -func GetChecksumInputAlgorithm(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) - return v -} - -// SetAttemptSkewContext sets the clock skew value on the context -func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { - return middleware.WithStackValue(ctx, clockSkew{}, v) -} - -// GetAttemptSkewContext gets the clock skew value from the context -func GetAttemptSkewContext(ctx context.Context) time.Duration { - x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) - return x -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go deleted file mode 100644 index e6223dd3b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go +++ /dev/null @@ -1,94 +0,0 @@ -package awsrulesfn - -import ( - "strings" -) - -// ARN provides AWS ARN components broken out into a data structure. -type ARN struct { - Partition string - Service string - Region string - AccountId string - ResourceId OptionalStringSlice -} - -const ( - arnDelimiters = ":" - resourceDelimiters = "/:" - arnSections = 6 - arnPrefix = "arn:" - - // zero-indexed - sectionPartition = 1 - sectionService = 2 - sectionRegion = 3 - sectionAccountID = 4 - sectionResource = 5 -) - -// ParseARN returns an [ARN] value parsed from the input string provided. If -// the ARN cannot be parsed nil will be returned, and error added to -// [ErrorCollector]. -func ParseARN(input string) *ARN { - if !strings.HasPrefix(input, arnPrefix) { - return nil - } - - sections := strings.SplitN(input, arnDelimiters, arnSections) - if numSections := len(sections); numSections != arnSections { - return nil - } - - if sections[sectionPartition] == "" { - return nil - } - if sections[sectionService] == "" { - return nil - } - if sections[sectionResource] == "" { - return nil - } - - return &ARN{ - Partition: sections[sectionPartition], - Service: sections[sectionService], - Region: sections[sectionRegion], - AccountId: sections[sectionAccountID], - ResourceId: splitResource(sections[sectionResource]), - } -} - -// splitResource splits the resource components by the ARN resource delimiters. -func splitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// OptionalStringSlice provides a helper to safely get the index of a string -// slice that may be out of bounds. Returns pointer to string if index is -// valid. Otherwise returns nil. -type OptionalStringSlice []string - -// Get returns a string pointer of the string at index i if the index is valid. -// Otherwise returns nil. -func (s OptionalStringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go deleted file mode 100644 index d5a365853..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package awsrulesfn provides AWS focused endpoint rule functions for -// evaluating endpoint resolution rules. -package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go deleted file mode 100644 index df72da97c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build codegen -// +build codegen - -package awsrulesfn - -//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go deleted file mode 100644 index 637e5fc18..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go +++ /dev/null @@ -1,51 +0,0 @@ -package awsrulesfn - -import ( - "net" - "strings" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket -// name and can be used with Amazon S3 virtual hosted style addressing. Similar -// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label -// must be [3:63] characters long, all lowercase, and not formatted as an IP -// address. -func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { - // input should not be formatted as an IP address - // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but - // validation further down will catch that anyway (it's guaranteed to have - // unfriendly characters % and : if that's the case) - if net.ParseIP(input) != nil { - return false - } - - var labels []string - if allowSubDomains { - labels = strings.Split(input, ".") - } else { - labels = []string{input} - } - - for _, label := range labels { - // validate special length constraints - if l := len(label); l < 3 || l > 63 { - return false - } - - // Validate no capital letters - for _, r := range label { - if r >= 'A' && r <= 'Z' { - return false - } - } - - // Validate valid host label - if !smithyhttp.ValidHostLabel(label) { - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go deleted file mode 100644 index 91414afe8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ /dev/null @@ -1,76 +0,0 @@ -package awsrulesfn - -import "regexp" - -// Partition provides the metadata describing an AWS partition. -type Partition struct { - ID string `json:"id"` - Regions map[string]RegionOverrides `json:"regions"` - RegionRegex string `json:"regionRegex"` - DefaultConfig PartitionConfig `json:"outputs"` -} - -// PartitionConfig provides the endpoint metadata for an AWS region or partition. -type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` - ImplicitGlobalRegion string `json:"implicitGlobalRegion"` -} - -type RegionOverrides struct { - Name *string `json:"name"` - DnsSuffix *string `json:"dnsSuffix"` - DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` - SupportsFIPS *bool `json:"supportsFIPS"` - SupportsDualStack *bool `json:"supportsDualStack"` -} - -const defaultPartition = "aws" - -func getPartition(partitions []Partition, region string) *PartitionConfig { - for _, partition := range partitions { - if v, ok := partition.Regions[region]; ok { - p := mergeOverrides(partition.DefaultConfig, v) - return &p - } - } - - for _, partition := range partitions { - regionRegex := regexp.MustCompile(partition.RegionRegex) - if regionRegex.MatchString(region) { - v := partition.DefaultConfig - return &v - } - } - - for _, partition := range partitions { - if partition.ID == defaultPartition { - v := partition.DefaultConfig - return &v - } - } - - return nil -} - -func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { - if from.Name != nil { - into.Name = *from.Name - } - if from.DnsSuffix != nil { - into.DnsSuffix = *from.DnsSuffix - } - if from.DualStackDnsSuffix != nil { - into.DualStackDnsSuffix = *from.DualStackDnsSuffix - } - if from.SupportsFIPS != nil { - into.SupportsFIPS = *from.SupportsFIPS - } - if from.SupportsDualStack != nil { - into.SupportsDualStack = *from.SupportsDualStack - } - return into -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go deleted file mode 100644 index 5f0779997..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ /dev/null @@ -1,403 +0,0 @@ -// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. - -package awsrulesfn - -// GetPartition returns an AWS [Partition] for the region provided. If the -// partition cannot be determined nil will be returned. -func GetPartition(region string) *PartitionConfig { - return getPartition(partitions, region) -} - -var partitions = []Partition{ - { - ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "us-east-1", - }, - Regions: map[string]RegionOverrides{ - "af-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-northeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ap-southeast-4": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "aws-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ca-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "ca-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-central-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-south-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "eu-west-3": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "il-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-central-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "me-south-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "sa-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-east-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-west-2": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-cn", - RegionRegex: "^cn\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "cn-northwest-1", - }, - Regions: map[string]RegionOverrides{ - "aws-cn-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-north-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "cn-northwest-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", - }, - Regions: map[string]RegionOverrides{ - "aws-us-gov-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso", - RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-iso-east-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-iso-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-b", - RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-isob-east-1", - }, - Regions: map[string]RegionOverrides{ - "aws-iso-b-global": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-isob-east-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-e", - RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eu-isoe-west-1", - }, - Regions: map[string]RegionOverrides{ - "eu-isoe-west-1": { - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - }, - }, - { - ID: "aws-iso-f", - RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", - DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "us-isof-south-1", - }, - Regions: map[string]RegionOverrides{}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json deleted file mode 100644 index 0066ea164..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "partitions" : [ { - "id" : "aws", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-east-1", - "name" : "aws", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", - "regions" : { - "af-south-1" : { - "description" : "Africa (Cape Town)" - }, - "ap-east-1" : { - "description" : "Asia Pacific (Hong Kong)" - }, - "ap-northeast-1" : { - "description" : "Asia Pacific (Tokyo)" - }, - "ap-northeast-2" : { - "description" : "Asia Pacific (Seoul)" - }, - "ap-northeast-3" : { - "description" : "Asia Pacific (Osaka)" - }, - "ap-south-1" : { - "description" : "Asia Pacific (Mumbai)" - }, - "ap-south-2" : { - "description" : "Asia Pacific (Hyderabad)" - }, - "ap-southeast-1" : { - "description" : "Asia Pacific (Singapore)" - }, - "ap-southeast-2" : { - "description" : "Asia Pacific (Sydney)" - }, - "ap-southeast-3" : { - "description" : "Asia Pacific (Jakarta)" - }, - "ap-southeast-4" : { - "description" : "Asia Pacific (Melbourne)" - }, - "ap-southeast-5" : { - "description" : "Asia Pacific (Malaysia)" - }, - "ap-southeast-7" : { - "description" : "Asia Pacific (Thailand)" - }, - "aws-global" : { - "description" : "AWS Standard global region" - }, - "ca-central-1" : { - "description" : "Canada (Central)" - }, - "ca-west-1" : { - "description" : "Canada West (Calgary)" - }, - "eu-central-1" : { - "description" : "Europe (Frankfurt)" - }, - "eu-central-2" : { - "description" : "Europe (Zurich)" - }, - "eu-north-1" : { - "description" : "Europe (Stockholm)" - }, - "eu-south-1" : { - "description" : "Europe (Milan)" - }, - "eu-south-2" : { - "description" : "Europe (Spain)" - }, - "eu-west-1" : { - "description" : "Europe (Ireland)" - }, - "eu-west-2" : { - "description" : "Europe (London)" - }, - "eu-west-3" : { - "description" : "Europe (Paris)" - }, - "il-central-1" : { - "description" : "Israel (Tel Aviv)" - }, - "me-central-1" : { - "description" : "Middle East (UAE)" - }, - "me-south-1" : { - "description" : "Middle East (Bahrain)" - }, - "sa-east-1" : { - "description" : "South America (Sao Paulo)" - }, - "us-east-1" : { - "description" : "US East (N. Virginia)" - }, - "us-east-2" : { - "description" : "US East (Ohio)" - }, - "us-west-1" : { - "description" : "US West (N. California)" - }, - "us-west-2" : { - "description" : "US West (Oregon)" - } - } - }, { - "id" : "aws-cn", - "outputs" : { - "dnsSuffix" : "amazonaws.com.cn", - "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", - "implicitGlobalRegion" : "cn-northwest-1", - "name" : "aws-cn", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^cn\\-\\w+\\-\\d+$", - "regions" : { - "aws-cn-global" : { - "description" : "AWS China global region" - }, - "cn-north-1" : { - "description" : "China (Beijing)" - }, - "cn-northwest-1" : { - "description" : "China (Ningxia)" - } - } - }, { - "id" : "aws-us-gov", - "outputs" : { - "dnsSuffix" : "amazonaws.com", - "dualStackDnsSuffix" : "api.aws", - "implicitGlobalRegion" : "us-gov-west-1", - "name" : "aws-us-gov", - "supportsDualStack" : true, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", - "regions" : { - "aws-us-gov-global" : { - "description" : "AWS GovCloud (US) global region" - }, - "us-gov-east-1" : { - "description" : "AWS GovCloud (US-East)" - }, - "us-gov-west-1" : { - "description" : "AWS GovCloud (US-West)" - } - } - }, { - "id" : "aws-iso", - "outputs" : { - "dnsSuffix" : "c2s.ic.gov", - "dualStackDnsSuffix" : "c2s.ic.gov", - "implicitGlobalRegion" : "us-iso-east-1", - "name" : "aws-iso", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-global" : { - "description" : "AWS ISO (US) global region" - }, - "us-iso-east-1" : { - "description" : "US ISO East" - }, - "us-iso-west-1" : { - "description" : "US ISO WEST" - } - } - }, { - "id" : "aws-iso-b", - "outputs" : { - "dnsSuffix" : "sc2s.sgov.gov", - "dualStackDnsSuffix" : "sc2s.sgov.gov", - "implicitGlobalRegion" : "us-isob-east-1", - "name" : "aws-iso-b", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", - "regions" : { - "aws-iso-b-global" : { - "description" : "AWS ISOB (US) global region" - }, - "us-isob-east-1" : { - "description" : "US ISOB East (Ohio)" - } - } - }, { - "id" : "aws-iso-e", - "outputs" : { - "dnsSuffix" : "cloud.adc-e.uk", - "dualStackDnsSuffix" : "cloud.adc-e.uk", - "implicitGlobalRegion" : "eu-isoe-west-1", - "name" : "aws-iso-e", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { - "eu-isoe-west-1" : { - "description" : "EU ISOE West" - } - } - }, { - "id" : "aws-iso-f", - "outputs" : { - "dnsSuffix" : "csp.hci.ic.gov", - "dualStackDnsSuffix" : "csp.hci.ic.gov", - "implicitGlobalRegion" : "us-isof-south-1", - "name" : "aws-iso-f", - "supportsDualStack" : false, - "supportsFIPS" : true - }, - "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", - "regions" : { } - } ], - "version" : "1.1" -} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go deleted file mode 100644 index 67950ca36..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go +++ /dev/null @@ -1,201 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string) bool { - _, ok := p.Endpoints[region] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - e, _ := p.endpointForRegion(region) - - return e.resolve(p.ID, region, p.Defaults, options), nil -} - -func (p Partition) endpointForRegion(region string) (Endpoint, bool) { - if e, ok := p.Endpoints[region]; ok { - return e, true - } - - if !p.IsRegionalized { - return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{}, false -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[string]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string `json:"signatureVersions"` -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -// MapFIPSRegion extracts the intrinsic AWS region from one that may have an -// embedded FIPS microformat. -func MapFIPSRegion(region string) string { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(region, fipsInfix) || - strings.Contains(region, fipsPrefix) || - strings.Contains(region, fipsSuffix) { - region = strings.ReplaceAll(region, fipsInfix, "-") - region = strings.ReplaceAll(region, fipsPrefix, "") - region = strings.ReplaceAll(region, fipsSuffix, "") - } - - return region -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md deleted file mode 100644 index f9eb3c6ad..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ /dev/null @@ -1,349 +0,0 @@ -# v2.6.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.26 (2024-12-19) - -* **Bug Fix**: Fix improper use of printf-style functions. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.6.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.5.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.4.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.3.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.2.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.1.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v2.0.0 (2021-11-06) - -* **Release**: Endpoint Variant Model Support -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go deleted file mode 100644 index 32251a7e3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go +++ /dev/null @@ -1,302 +0,0 @@ -package endpoints - -import ( - "fmt" - "github.com/aws/smithy-go/logging" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// DefaultKey is a compound map key of a variant and other values. -type DefaultKey struct { - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointKey is a compound map key of a region and associated variant value. -type EndpointKey struct { - Region string - Variant EndpointVariant - ServiceVariant ServiceVariant -} - -// EndpointVariant is a bit field to describe the endpoints attributes. -type EndpointVariant uint64 - -const ( - // FIPSVariant indicates that the endpoint is FIPS capable. - FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) - - // DualStackVariant indicates that the endpoint is DualStack capable. - DualStackVariant -) - -// ServiceVariant is a bit field to describe the service endpoint attributes. -type ServiceVariant uint64 - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "s3v4"} -) - -// Options provide configuration needed to direct how endpoints are resolved. -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. - LogDeprecated bool - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Disable usage of HTTPS (TLS / SSL) - DisableHTTPS bool - - // Instruct the resolver to use a service endpoint that supports dual-stack. - // If a service does not have a dual-stack endpoint an error will be returned by the resolver. - UseDualStackEndpoint aws.DualStackEndpointState - - // Instruct the resolver to use a service endpoint that supports FIPS. - // If a service does not have a FIPS endpoint an error will be returned by the resolver. - UseFIPSEndpoint aws.FIPSEndpointState - - // ServiceVariant is a bitfield of service specified endpoint variant data. - ServiceVariant ServiceVariant -} - -// GetEndpointVariant returns the EndpointVariant for the variant associated options. -func (o Options) GetEndpointVariant() (v EndpointVariant) { - if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - v |= DualStackVariant - } - if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - v |= FIPSVariant - } - return v -} - -// Partitions is a slice of partition -type Partitions []Partition - -// ResolveEndpoint resolves a service endpoint for the given region and options. -func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { - if len(ps) == 0 { - return aws.Endpoint{}, fmt.Errorf("no partitions found") - } - - if opts.Logger == nil { - opts.Logger = logging.Nop{} - } - - if len(opts.ResolvedRegion) > 0 { - region = opts.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(region, opts) { - continue - } - - return ps[i].ResolveEndpoint(region, opts) - } - - // fallback to first partition format to use when resolving the endpoint. - return ps[0].ResolveEndpoint(region, opts) -} - -// Partition is an AWS partition description for a service and its' region endpoints. -type Partition struct { - ID string - RegionRegex *regexp.Regexp - PartitionEndpoint string - IsRegionalized bool - Defaults map[DefaultKey]Endpoint - Endpoints Endpoints -} - -func (p Partition) canResolveEndpoint(region string, opts Options) bool { - _, ok := p.Endpoints[EndpointKey{ - Region: region, - Variant: opts.GetEndpointVariant(), - }] - return ok || p.RegionRegex.MatchString(region) -} - -// ResolveEndpoint resolves and service endpoint for the given region and options. -func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { - if len(region) == 0 && len(p.PartitionEndpoint) != 0 { - region = p.PartitionEndpoint - } - - endpoints := p.Endpoints - - variant := options.GetEndpointVariant() - serviceVariant := options.ServiceVariant - - defaults := p.Defaults[DefaultKey{ - Variant: variant, - ServiceVariant: serviceVariant, - }] - - return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) -} - -func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { - key := EndpointKey{ - Region: region, - Variant: variant, - } - - if e, ok := endpoints[key]; ok { - return e - } - - if !p.IsRegionalized { - return endpoints[EndpointKey{ - Region: p.PartitionEndpoint, - Variant: variant, - ServiceVariant: serviceVariant, - }] - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return Endpoint{} -} - -// Endpoints is a map of service config regions to endpoints -type Endpoints map[EndpointKey]Endpoint - -// CredentialScope is the credential scope of a region and service -type CredentialScope struct { - Region string - Service string -} - -// Endpoint is a service endpoint description -type Endpoint struct { - // True if the endpoint cannot be resolved for this partition/region/service - Unresolveable aws.Ternary - - Hostname string - Protocols []string - - CredentialScope CredentialScope - - SignatureVersions []string - - // Indicates that this endpoint is deprecated. - Deprecated aws.Ternary -} - -// IsZero returns whether the endpoint structure is an empty (zero) value. -func (e Endpoint) IsZero() bool { - switch { - case e.Unresolveable != aws.UnknownTernary: - return false - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (CredentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - } - return true -} - -func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { - var merged Endpoint - merged.mergeIn(def) - merged.mergeIn(e) - e = merged - - if e.IsZero() { - return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) - } - - var u string - if e.Unresolveable != aws.TrueTernary { - // Only attempt to resolve the endpoint if it can be resolved. - hostname := strings.Replace(e.Hostname, "{region}", region, 1) - - scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) - u = scheme + "://" + hostname - } - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - signingName := e.CredentialScope.Service - - if e.Deprecated == aws.TrueTernary && options.LogDeprecated { - options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) - } - - return aws.Endpoint{ - URL: u, - PartitionID: partition, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func (e *Endpoint) mergeIn(other Endpoint) { - if other.Unresolveable != aws.UnknownTernary { - e.Unresolveable = other.Unresolveable - } - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if other.Deprecated != aws.UnknownTernary { - e.Deprecated = other.Deprecated - } -} - -func getEndpointScheme(protocols []string, disableHTTPS bool) string { - if disableHTTPS { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go deleted file mode 100644 index 44d8b5195..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package endpoints - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md deleted file mode 100644 index be61098b4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ /dev/null @@ -1,275 +0,0 @@ -# v1.8.1 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.8.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.7.3 (2024-01-22) - -* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. - -# v1.7.2 (2023-12-08) - -* **Bug Fix**: Correct loading of [services *] sections into shared config. - -# v1.7.1 (2023-11-16) - -* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. - -# v1.7.0 (2023-11-13) - -* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. - -# v1.6.0 (2023-11-09.2) - -* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored - -# v1.5.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2023-11-07) - -* **Bug Fix**: Fix subproperty performance regression - -# v1.5.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.45 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.44 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.43 (2023-09-22) - -* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. -* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. - -# v1.3.42 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.41 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.40 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.39 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.38 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.37 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.36 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.35 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.34 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.33 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.32 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.31 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.30 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.29 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.28 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2022-05-17) - -* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2022-03-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2022-02-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2022-01-28) - -* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. - -# v1.3.4 (2022-01-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2022-01-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2021-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-08-04) - -* **Feature**: adds error handling for defered close calls -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-07-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-07-01) - -* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. - -# v1.0.1 (2021-06-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2021-05-20) - -* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go deleted file mode 100644 index 0f278d55e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package ini - -import "fmt" - -// UnableToReadFile is an error indicating that a ini file could not be read -type UnableToReadFile struct { - Err error -} - -// Error returns an error message and the underlying error message if present -func (e *UnableToReadFile) Error() string { - base := "unable to read file" - if e.Err == nil { - return base - } - return fmt.Sprintf("%s: %v", base, e.Err) -} - -// Unwrap returns the underlying error -func (e *UnableToReadFile) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go deleted file mode 100644 index ef6a38110..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ini - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go deleted file mode 100644 index cefcce91e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package ini implements parsing of the AWS shared config file. -// -// Example: -// sections, err := ini.OpenFile("/path/to/file") -// if err != nil { -// panic(err) -// } -// -// profile := "foo" -// section, ok := sections.GetSection(profile) -// if !ok { -// fmt.Printf("section %q could not be found", profile) -// } -package ini - -import ( - "fmt" - "io" - "os" - "strings" -) - -// OpenFile parses shared config from the given file path. -func OpenFile(path string) (sections Sections, err error) { - f, oerr := os.Open(path) - if oerr != nil { - return Sections{}, &UnableToReadFile{Err: oerr} - } - - defer func() { - closeErr := f.Close() - if err == nil { - err = closeErr - } else if closeErr != nil { - err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) - } - }() - - return Parse(f, path) -} - -// Parse parses shared config from the given reader. -func Parse(r io.Reader, path string) (Sections, error) { - contents, err := io.ReadAll(r) - if err != nil { - return Sections{}, fmt.Errorf("read all: %v", err) - } - - lines := strings.Split(string(contents), "\n") - tokens, err := tokenize(lines) - if err != nil { - return Sections{}, fmt.Errorf("tokenize: %v", err) - } - - return parse(tokens, path), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go deleted file mode 100644 index 2422d9046..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go +++ /dev/null @@ -1,109 +0,0 @@ -package ini - -import ( - "fmt" - "strings" -) - -func parse(tokens []lineToken, path string) Sections { - parser := &parser{ - path: path, - sections: NewSections(), - } - parser.parse(tokens) - return parser.sections -} - -type parser struct { - csection, ckey string // current state - path string // source file path - sections Sections // parse result -} - -func (p *parser) parse(tokens []lineToken) { - for _, otok := range tokens { - switch tok := otok.(type) { - case *lineTokenProfile: - p.handleProfile(tok) - case *lineTokenProperty: - p.handleProperty(tok) - case *lineTokenSubProperty: - p.handleSubProperty(tok) - case *lineTokenContinuation: - p.handleContinuation(tok) - } - } -} - -func (p *parser) handleProfile(tok *lineTokenProfile) { - name := tok.Name - if tok.Type != "" { - name = fmt.Sprintf("%s %s", tok.Type, tok.Name) - } - p.ckey = "" - p.csection = name - if _, ok := p.sections.container[name]; !ok { - p.sections.container[name] = NewSection(name) - } -} - -func (p *parser) handleProperty(tok *lineTokenProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - p.ckey = tok.Key - if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { - section := p.sections.container[p.csection] - section.Logs = append(p.sections.container[p.csection].Logs, - fmt.Sprintf( - "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", - p.csection, tok.Key, tok.Key, p.path, - ), - ) - p.sections.container[p.csection] = section - } - - p.sections.container[p.csection].values[tok.Key] = Value{ - str: tok.Value, - } - p.sections.container[p.csection].SourceFile[tok.Key] = p.path -} - -func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { - if p.csection == "" { - return // LEGACY: don't error on "global" properties - } - - if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { - // This is an "orphaned" subproperty, either because it's at - // the beginning of a section or because the last property's - // value isn't empty. Either way we're lenient here and - // "promote" this to a normal property. - p.handleProperty(&lineTokenProperty{ - Key: tok.Key, - Value: strings.TrimSpace(trimPropertyComment(tok.Value)), - }) - return - } - - if p.sections.container[p.csection].values[p.ckey].mp == nil { - p.sections.container[p.csection].values[p.ckey] = Value{ - mp: map[string]string{}, - } - } - p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value -} - -func (p *parser) handleContinuation(tok *lineTokenContinuation) { - if p.ckey == "" { - return - } - - value, _ := p.sections.container[p.csection].values[p.ckey] - if value.str != "" && value.mp == nil { - value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) - } - - p.sections.container[p.csection].values[p.ckey] = value -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go deleted file mode 100644 index dd89848e6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go +++ /dev/null @@ -1,157 +0,0 @@ -package ini - -import ( - "sort" -) - -// Sections is a map of Section structures that represent -// a configuration. -type Sections struct { - container map[string]Section -} - -// NewSections returns empty ini Sections -func NewSections() Sections { - return Sections{ - container: make(map[string]Section, 0), - } -} - -// GetSection will return section p. If section p does not exist, -// false will be returned in the second parameter. -func (t Sections) GetSection(p string) (Section, bool) { - v, ok := t.container[p] - return v, ok -} - -// HasSection denotes if Sections consist of a section with -// provided name. -func (t Sections) HasSection(p string) bool { - _, ok := t.container[p] - return ok -} - -// SetSection sets a section value for provided section name. -func (t Sections) SetSection(p string, v Section) Sections { - t.container[p] = v - return t -} - -// DeleteSection deletes a section entry/value for provided section name./ -func (t Sections) DeleteSection(p string) { - delete(t.container, p) -} - -// values represents a map of union values. -type values map[string]Value - -// List will return a list of all sections that were successfully -// parsed. -func (t Sections) List() []string { - keys := make([]string, len(t.container)) - i := 0 - for k := range t.container { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// Section contains a name and values. This represent -// a sectioned entry in a configuration file. -type Section struct { - // Name is the Section profile name - Name string - - // values are the values within parsed profile - values values - - // Errors is the list of errors - Errors []error - - // Logs is the list of logs - Logs []string - - // SourceFile is the INI Source file from where this section - // was retrieved. They key is the property, value is the - // source file the property was retrieved from. - SourceFile map[string]string -} - -// NewSection returns an initialize section for the name -func NewSection(name string) Section { - return Section{ - Name: name, - values: values{}, - SourceFile: map[string]string{}, - } -} - -// List will return a list of all -// services in values -func (t Section) List() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - - sort.Strings(keys) - return keys -} - -// UpdateSourceFile updates source file for a property to provided filepath. -func (t Section) UpdateSourceFile(property string, filepath string) { - t.SourceFile[property] = filepath -} - -// UpdateValue updates value for a provided key with provided value -func (t Section) UpdateValue(k string, v Value) error { - t.values[k] = v - return nil -} - -// Has will return whether or not an entry exists in a given section -func (t Section) Has(k string) bool { - _, ok := t.values[k] - return ok -} - -// ValueType will returned what type the union is set to. If -// k was not found, the NoneType will be returned. -func (t Section) ValueType(k string) (ValueType, bool) { - v, ok := t.values[k] - return v.Type, ok -} - -// Bool returns a bool value at k -func (t Section) Bool(k string) (bool, bool) { - return t.values[k].BoolValue() -} - -// Int returns an integer value at k -func (t Section) Int(k string) (int64, bool) { - return t.values[k].IntValue() -} - -// Map returns a map value at k -func (t Section) Map(k string) map[string]string { - return t.values[k].MapValue() -} - -// Float64 returns a float value at k -func (t Section) Float64(k string) (float64, bool) { - return t.values[k].FloatValue() -} - -// String returns the string value at k -func (t Section) String(k string) string { - _, ok := t.values[k] - if !ok { - return "" - } - return t.values[k].StringValue() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go deleted file mode 100644 index ed77d0835..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go +++ /dev/null @@ -1,89 +0,0 @@ -package ini - -import ( - "strings" -) - -func trimProfileComment(s string) string { - r, _, _ := strings.Cut(s, "#") - r, _, _ = strings.Cut(r, ";") - return r -} - -func trimPropertyComment(s string) string { - r, _, _ := strings.Cut(s, " #") - r, _, _ = strings.Cut(r, " ;") - r, _, _ = strings.Cut(r, "\t#") - r, _, _ = strings.Cut(r, "\t;") - return r -} - -// assumes no surrounding comment -func splitProperty(s string) (string, string, bool) { - equalsi := strings.Index(s, "=") - coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment - sep := "=" - if equalsi == -1 || coloni != -1 && coloni < equalsi { - sep = ":" - } - - k, v, ok := strings.Cut(s, sep) - if !ok { - return "", "", false - } - return strings.TrimSpace(k), strings.TrimSpace(v), true -} - -// assumes no surrounding comment, whitespace, or profile brackets -func splitProfile(s string) (string, string) { - var first int - for i, r := range s { - if isLineSpace(r) { - if first == 0 { - first = i - } - } else { - if first != 0 { - return s[:first], s[i:] - } - } - } - if first == 0 { - return "", s // type component is effectively blank - } - return "", "" -} - -func isLineSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func unquote(s string) string { - if isSingleQuoted(s) || isDoubleQuoted(s) { - return s[1 : len(s)-1] - } - return s -} - -// applies various legacy conversions to property values: -// - remote wrapping single/doublequotes -func legacyStrconv(s string) string { - s = unquote(s) - return s -} - -func isSingleQuoted(s string) bool { - return hasAffixes(s, "'", "'") -} - -func isDoubleQuoted(s string) bool { - return hasAffixes(s, `"`, `"`) -} - -func isBracketed(s string) bool { - return hasAffixes(s, "[", "]") -} - -func hasAffixes(s, left, right string) bool { - return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go deleted file mode 100644 index 6e9a03744..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go +++ /dev/null @@ -1,32 +0,0 @@ -package ini - -type lineToken interface { - isLineToken() -} - -type lineTokenProfile struct { - Type string - Name string -} - -func (*lineTokenProfile) isLineToken() {} - -type lineTokenProperty struct { - Key string - Value string -} - -func (*lineTokenProperty) isLineToken() {} - -type lineTokenContinuation struct { - Value string -} - -func (*lineTokenContinuation) isLineToken() {} - -type lineTokenSubProperty struct { - Key string - Value string -} - -func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go deleted file mode 100644 index 89a773684..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go +++ /dev/null @@ -1,92 +0,0 @@ -package ini - -import ( - "strings" -) - -func tokenize(lines []string) ([]lineToken, error) { - tokens := make([]lineToken, 0, len(lines)) - for _, line := range lines { - if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { - continue - } - - if tok := asProfile(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asSubProperty(line); tok != nil { - tokens = append(tokens, tok) - } else if tok := asContinuation(line); tok != nil { - tokens = append(tokens, tok) - } // unrecognized tokens are effectively ignored - } - return tokens, nil -} - -func isLineComment(line string) bool { - trimmed := strings.TrimLeft(line, " \t") - return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") -} - -func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" - trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]" - if !isBracketed(trimmed) { - return nil - } - trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") - trimmed = strings.TrimSpace(trimmed) // "type name" / "name" - typ, name := splitProfile(trimmed) - return &lineTokenProfile{ - Type: typ, - Name: name, - } -} - -func asProperty(line string) *lineTokenProperty { - if isLineSpace(rune(line[0])) { - return nil - } - - trimmed := trimPropertyComment(line) - trimmed = strings.TrimRight(trimmed, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenProperty{ - Key: strings.ToLower(k), // LEGACY: normalize key case - Value: legacyStrconv(v), // LEGACY: see func docs - } -} - -func asSubProperty(line string) *lineTokenSubProperty { - if !isLineSpace(rune(line[0])) { - return nil - } - - // comments on sub-properties are included in the value - trimmed := strings.TrimLeft(line, " \t") - k, v, ok := splitProperty(trimmed) - if !ok { - return nil - } - - return &lineTokenSubProperty{ // same LEGACY constraints as in normal property - Key: strings.ToLower(k), - Value: legacyStrconv(v), - } -} - -func asContinuation(line string) *lineTokenContinuation { - if !isLineSpace(rune(line[0])) { - return nil - } - - // includes comments like sub-properties - trimmed := strings.TrimLeft(line, " \t") - return &lineTokenContinuation{ - Value: trimmed, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go deleted file mode 100644 index e3706b3c3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go +++ /dev/null @@ -1,93 +0,0 @@ -package ini - -import ( - "fmt" - "strconv" - "strings" -) - -// ValueType is an enum that will signify what type -// the Value is -type ValueType int - -func (v ValueType) String() string { - switch v { - case NoneType: - return "NONE" - case StringType: - return "STRING" - } - - return "" -} - -// ValueType enums -const ( - NoneType = ValueType(iota) - StringType - QuotedStringType -) - -// Value is a union container -type Value struct { - Type ValueType - - str string - mp map[string]string -} - -// NewStringValue returns a Value type generated using a string input. -func NewStringValue(str string) (Value, error) { - return Value{str: str}, nil -} - -func (v Value) String() string { - switch v.Type { - case StringType: - return fmt.Sprintf("string: %s", string(v.str)) - case QuotedStringType: - return fmt.Sprintf("quoted string: %s", string(v.str)) - default: - return "union not set" - } -} - -// MapValue returns a map value for sub properties -func (v Value) MapValue() map[string]string { - return v.mp -} - -// IntValue returns an integer value -func (v Value) IntValue() (int64, bool) { - i, err := strconv.ParseInt(string(v.str), 0, 64) - if err != nil { - return 0, false - } - return i, true -} - -// FloatValue returns a float value -func (v Value) FloatValue() (float64, bool) { - f, err := strconv.ParseFloat(string(v.str), 64) - if err != nil { - return 0, false - } - return f, true -} - -// BoolValue returns a bool value -func (v Value) BoolValue() (bool, bool) { - // we don't use ParseBool as it recognizes more than what we've - // historically supported - if strings.EqualFold(v.str, "true") { - return true, true - } else if strings.EqualFold(v.str, "false") { - return false, true - } - return false, false -} - -// StringValue returns the string value -func (v Value) StringValue() string { - return v.str -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go deleted file mode 100644 index 8e24a3f0a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go +++ /dev/null @@ -1,42 +0,0 @@ -package middleware - -import ( - "context" - "sync/atomic" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. -// This can be read by other operations (such as signing) to correct the date value they send -// on the request -type AddTimeOffsetMiddleware struct { - Offset *atomic.Int64 -} - -// ID the identifier for AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } - -// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. -func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - if m.Offset != nil { - offset := time.Duration(m.Offset.Load()) - ctx = internalcontext.SetAttemptSkewContext(ctx, offset) - } - return next.HandleBuild(ctx, in) -} - -// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer -// held by AddTimeOffsetMiddleware -func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { - m.Offset.Store(v.Nanoseconds()) - } - return next.HandleDeserialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go deleted file mode 100644 index c8484dcd7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go +++ /dev/null @@ -1,33 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -var floatMaxBigInt = big.NewInt(1 << 53) - -// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). -func Float64(reader io.Reader) (float64, error) { - bi, err := rand.Int(reader, floatMaxBigInt) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %v", err) - } - - return float64(bi.Int64()) / (1 << 53), nil -} - -// CryptoRandFloat64 returns a random float64 obtained from the crypto rand -// source. -func CryptoRandFloat64() (float64, error) { - return Float64(Reader) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go deleted file mode 100644 index 2b42cbe64..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go +++ /dev/null @@ -1,9 +0,0 @@ -package sdk - -// Invalidator provides access to a type's invalidate method to make it -// invalidate it cache. -// -// e.g aws.SafeCredentialsProvider's Invalidate method. -type Invalidator interface { - Invalidate() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go deleted file mode 100644 index 8e8dabad5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go +++ /dev/null @@ -1,74 +0,0 @@ -package sdk - -import ( - "context" - "time" -) - -func init() { - NowTime = time.Now - Sleep = time.Sleep - SleepWithContext = sleepWithContext -} - -// NowTime is a value for getting the current time. This value can be overridden -// for testing mocking out current time. -var NowTime func() time.Time - -// Sleep is a value for sleeping for a duration. This value can be overridden -// for testing and mocking out sleep duration. -var Sleep func(time.Duration) - -// SleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the Context's -// error will be returned. -// -// This value can be overridden for testing and mocking out sleep duration. -var SleepWithContext func(context.Context, time.Duration) error - -// sleepWithContext will wait for the timer duration to expire, or the context -// is canceled. Which ever happens first. If the context is canceled the -// Context's error will be returned. -func sleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} - -// noOpSleepWithContext does nothing, returns immediately. -func noOpSleepWithContext(context.Context, time.Duration) error { - return nil -} - -func noOpSleep(time.Duration) {} - -// TestingUseNopSleep is a utility for disabling sleep across the SDK for -// testing. -func TestingUseNopSleep() func() { - SleepWithContext = noOpSleepWithContext - Sleep = noOpSleep - - return func() { - SleepWithContext = sleepWithContext - Sleep = time.Sleep - } -} - -// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time -// for testing purposes. -func TestingUseReferenceTime(referenceTime time.Time) func() { - NowTime = func() time.Time { - return referenceTime - } - return func() { - NowTime = time.Now - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go deleted file mode 100644 index 6c443988b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package sdkio - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go deleted file mode 100644 index c96b717e0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go +++ /dev/null @@ -1,47 +0,0 @@ -package shareddefaults - -import ( - "os" - "os/user" - "path/filepath" -) - -// SharedCredentialsFilename returns the SDK's default file path -// for the shared credentials file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/credentials -// - Windows: %USERPROFILE%\.aws\credentials -func SharedCredentialsFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "credentials") -} - -// SharedConfigFilename returns the SDK's default file path for -// the shared config file. -// -// Builds the shared config file path based on the OS's platform. -// -// - Linux/Unix: $HOME/.aws/config -// - Windows: %USERPROFILE%\.aws\config -func SharedConfigFilename() string { - return filepath.Join(UserHomeDir(), ".aws", "config") -} - -// UserHomeDir returns the home directory for the user the process is -// running under. -func UserHomeDir() string { - // Ignore errors since we only care about Windows and *nix. - home, _ := os.UserHomeDir() - - if len(home) > 0 { - return home - } - - currUser, _ := user.Current() - if currUser != nil { - home = currUser.HomeDir - } - - return home -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go deleted file mode 100644 index d008ae27c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go +++ /dev/null @@ -1,11 +0,0 @@ -package strings - -import ( - "strings" -) - -// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, -// under Unicode case-folding. -func HasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go deleted file mode 100644 index cb70616e8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight -package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go deleted file mode 100644 index 5d69db5f2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go +++ /dev/null @@ -1,13 +0,0 @@ -package timeconv - -import "time" - -// FloatSecondsDur converts a fractional seconds to duration. -func FloatSecondsDur(v float64) time.Duration { - return time.Duration(v * float64(time.Second)) -} - -// DurSecondsFloat converts a duration into fractional seconds. -func DurSecondsFloat(d time.Duration) float64 { - return float64(d) / float64(time.Second) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md deleted file mode 100644 index aab35c8d9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ /dev/null @@ -1,311 +0,0 @@ -# v1.3.28 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.27 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.26 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.25 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.24 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.23 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.22 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.21 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-10-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.4 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.28 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.27 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.26 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.25 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.24 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.23 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.22 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.21 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.20 (2023-02-14) - -* No change notes available for this release. - -# v1.0.19 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.18 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.17 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.16 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.15 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.14 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.13 (2022-09-14) - -* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.12 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.11 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.10 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.9 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.8 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.7 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.6 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.5 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.4 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.3 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.2 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.1 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2022-04-07) - -* **Release**: New internal v4a signing module location. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go deleted file mode 100644 index 3ae3a019e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go +++ /dev/null @@ -1,141 +0,0 @@ -package v4a - -import ( - "context" - "crypto/ecdsa" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" -) - -// Credentials is Context, ECDSA, and Optional Session Token that can be used -// to sign requests using SigV4a -type Credentials struct { - Context string - PrivateKey *ecdsa.PrivateKey - SessionToken string - - // Time the credentials will expire. - CanExpire bool - Expires time.Time -} - -// Expired returns if the credentials have expired. -func (v Credentials) Expired() bool { - if v.CanExpire { - return !v.Expires.After(sdk.NowTime()) - } - - return false -} - -// HasKeys returns if the credentials keys are set. -func (v Credentials) HasKeys() bool { - return len(v.Context) > 0 && v.PrivateKey != nil -} - -// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials -// to a ECDSA PrivateKey for signing with SiV4a -type SymmetricCredentialAdaptor struct { - SymmetricProvider aws.CredentialsProvider - - asymmetric atomic.Value - m sync.Mutex -} - -// Retrieve retrieves symmetric credentials from the underlying provider. -func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) { - symCreds, err := s.retrieveFromSymmetricProvider(ctx) - if err != nil { - return aws.Credentials{}, err - } - - if asymCreds := s.getCreds(); asymCreds == nil { - return symCreds, nil - } - - s.m.Lock() - defer s.m.Unlock() - - asymCreds := s.getCreds() - if asymCreds == nil { - return symCreds, nil - } - - // if the context does not match the access key id clear it - if asymCreds.Context != symCreds.AccessKeyID { - s.asymmetric.Store((*Credentials)(nil)) - } - - return symCreds, nil -} - -// RetrievePrivateKey returns credentials suitable for SigV4a signing -func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) { - if asymCreds := s.getCreds(); asymCreds != nil { - return *asymCreds, nil - } - - s.m.Lock() - defer s.m.Unlock() - - if asymCreds := s.getCreds(); asymCreds != nil { - return *asymCreds, nil - } - - symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx) - if err != nil { - return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err) - } - - privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey) - if err != nil { - return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials") - } - - creds := Credentials{ - Context: symmetricCreds.AccessKeyID, - PrivateKey: privateKey, - SessionToken: symmetricCreds.SessionToken, - CanExpire: symmetricCreds.CanExpire, - Expires: symmetricCreds.Expires, - } - - s.asymmetric.Store(&creds) - - return creds, nil -} - -func (s *SymmetricCredentialAdaptor) getCreds() *Credentials { - v := s.asymmetric.Load() - - if v == nil { - return nil - } - - c := v.(*Credentials) - if c != nil && c.HasKeys() && !c.Expired() { - return c - } - - return nil -} - -func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) { - credentials, err := s.SymmetricProvider.Retrieve(ctx) - if err != nil { - return aws.Credentials{}, err - } - - return credentials, nil -} - -// CredentialsProvider is the interface for a provider to retrieve credentials -// to sign requests with. -type CredentialsProvider interface { - RetrievePrivateKey(context.Context) (Credentials, error) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go deleted file mode 100644 index 380d17427..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package v4a - -import "fmt" - -// SigningError indicates an error condition occurred while performing SigV4a signing -type SigningError struct { - Err error -} - -func (e *SigningError) Error() string { - return fmt.Sprintf("failed to sign request: %v", e.Err) -} - -// Unwrap returns the underlying error cause -func (e *SigningError) Unwrap() error { - return e.Err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go deleted file mode 100644 index 46d009b75..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package v4a - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.28" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go deleted file mode 100644 index 1d0f25f8c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go +++ /dev/null @@ -1,30 +0,0 @@ -package crypto - -import "fmt" - -// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison -// if the two byte slices assuming they represent a big-endian number. -// -// error if len(x) != len(y) -// -1 if x < y -// 0 if x == y -// +1 if x > y -func ConstantTimeByteCompare(x, y []byte) (int, error) { - if len(x) != len(y) { - return 0, fmt.Errorf("slice lengths do not match") - } - - xLarger, yLarger := 0, 0 - - for i := 0; i < len(x); i++ { - xByte, yByte := int(x[i]), int(y[i]) - - x := ((yByte - xByte) >> 8) & 1 - y := ((xByte - yByte) >> 8) & 1 - - xLarger |= x &^ yLarger - yLarger |= y &^ xLarger - } - - return xLarger - yLarger, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go deleted file mode 100644 index 758c73fcb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go +++ /dev/null @@ -1,113 +0,0 @@ -package crypto - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/hmac" - "encoding/asn1" - "encoding/binary" - "fmt" - "hash" - "math" - "math/big" -) - -type ecdsaSignature struct { - R, S *big.Int -} - -// ECDSAKey takes the given elliptic curve, and private key (d) byte slice -// and returns the private ECDSA key. -func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey { - return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d)) -} - -// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the -// private and public keypair -func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey { - pX, pY := curve.ScalarBaseMult(d.Bytes()) - - privKey := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: pX, - Y: pY, - }, - D: d, - } - - return privKey -} - -// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns -// *ecdsa.PublicKey. Returns an error if the given points are not on the curve. -func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) { - xPoint := (&big.Int{}).SetBytes(x) - yPoint := (&big.Int{}).SetBytes(y) - - if !curve.IsOnCurve(xPoint, yPoint) { - return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String()) - } - - return &ecdsa.PublicKey{ - Curve: curve, - X: xPoint, - Y: yPoint, - }, nil -} - -// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns -// whether the given signature is valid. -func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) { - var ecdsaSignature ecdsaSignature - - _, err := asn1.Unmarshal(signature, &ecdsaSignature) - if err != nil { - return false, err - } - - return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil -} - -// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode. -// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of -// `r` is defined as a 4 byte counter. -func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) { - // verify that we won't overflow the counter - n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size()))) - if n > 0x7FFFFFFF { - return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen) - } - - // verify the requested bit length is not larger then the length encoding size - if int64(bitLen) > 0x7FFFFFFF { - return nil, fmt.Errorf("bitLen is greater than 32-bits") - } - - fixedInput := bytes.NewBuffer(nil) - fixedInput.Write(label) - fixedInput.WriteByte(0x00) - fixedInput.Write(context) - if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil { - return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err) - } - - var output []byte - - h := hmac.New(hash, key) - - for i := int64(1); i <= n; i++ { - h.Reset() - if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil { - return nil, err - } - _, err := h.Write(fixedInput.Bytes()) - if err != nil { - return nil, err - } - output = append(output, h.Sum(nil)...) - } - - return output[:bitLen/8], nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go deleted file mode 100644 index 89a76e2ea..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -const ( - // EmptyStringSHA256 is the hex encoded sha256 value of an empty string - EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` - - // UnsignedPayload indicates that the request payload body is unsigned - UnsignedPayload = "UNSIGNED-PAYLOAD" - - // AmzAlgorithmKey indicates the signing algorithm - AmzAlgorithmKey = "X-Amz-Algorithm" - - // AmzSecurityTokenKey indicates the security token to be used with temporary credentials - AmzSecurityTokenKey = "X-Amz-Security-Token" - - // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' - AmzDateKey = "X-Amz-Date" - - // AmzCredentialKey is the access key ID and credential scope - AmzCredentialKey = "X-Amz-Credential" - - // AmzSignedHeadersKey is the set of headers signed for the request - AmzSignedHeadersKey = "X-Amz-SignedHeaders" - - // AmzSignatureKey is the query parameter to store the SigV4 signature - AmzSignatureKey = "X-Amz-Signature" - - // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter - TimeFormat = "20060102T150405Z" - - // ShortTimeFormat is the shorten time format used in the credential scope - ShortTimeFormat = "20060102" - - // ContentSHAKey is the SHA256 of request body - ContentSHAKey = "X-Amz-Content-Sha256" -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go deleted file mode 100644 index a15177e8f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" -) - -// Rules houses a set of Rule needed for validation of a -// string value -type Rules []Rule - -// Rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that Rule -type Rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r Rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// MapRule generic Rule for maps -type MapRule map[string]struct{} - -// IsValid for the map Rule satisfies whether it exists in the map -func (m MapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// AllowList is a generic Rule for whitelisting -type AllowList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (w AllowList) IsValid(value string) bool { - return w.Rule.IsValid(value) -} - -// DenyList is a generic Rule for blacklisting -type DenyList struct { - Rule -} - -// IsValid for AllowList checks if the value is within the AllowList -func (b DenyList) IsValid(value string) bool { - return !b.Rule.IsValid(value) -} - -// Patterns is a list of strings to match against -type Patterns []string - -// IsValid for Patterns checks each pattern and returns if a match has -// been found -func (p Patterns) IsValid(value string) bool { - for _, pattern := range p { - if sdkstrings.HasPrefixFold(value, pattern) { - return true - } - } - return false -} - -// InclusiveRules rules allow for rules to depend on one another -type InclusiveRules []Rule - -// IsValid will return true if all rules are true -func (r InclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go deleted file mode 100644 index 3487dc335..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go +++ /dev/null @@ -1,67 +0,0 @@ -package v4 - -// IgnoredHeaders is a list of headers that are ignored during signing -var IgnoredHeaders = Rules{ - DenyList{ - MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - }, - }, -} - -// RequiredSignedHeaders is a whitelist for Build canonical headers. -var RequiredSignedHeaders = Rules{ - AllowList{ - MapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - "X-Amz-Content-Sha256": struct{}{}, - "X-Amz-Tagging": struct{}{}, - }, - }, - Patterns{"X-Amz-Meta-"}, -} - -// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value -// represents whether or not it is a pattern. -var AllowedQueryHoisting = InclusiveRules{ - DenyList{RequiredSignedHeaders}, - Patterns{"X-Amz-"}, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go deleted file mode 100644 index e7fa7a1b1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go +++ /dev/null @@ -1,13 +0,0 @@ -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" -) - -// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. -func HMACSHA256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go deleted file mode 100644 index bf93659a4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go +++ /dev/null @@ -1,75 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// SanitizeHostForHeader removes default port from host and updates request.Host -func SanitizeHostForHeader(r *http.Request) { - host := getHost(r) - port := portOnly(host) - if port != "" && isDefaultPort(r.URL.Scheme, port) { - r.Host = stripPort(host) - } -} - -// Returns host from request -func getHost(r *http.Request) string { - if r.Host != "" { - return r.Host - } - - return r.URL.Host -} - -// Hostname returns u.Host, without any port number. -// -// If Host is an IPv6 literal with a port number, Hostname returns the -// IPv6 literal without the square brackets. IPv6 literals may include -// a zone identifier. -// -// Copied from the Go 1.8 standard library (net/url) -func stripPort(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return hostport - } - if i := strings.IndexByte(hostport, ']'); i != -1 { - return strings.TrimPrefix(hostport[:i], "[") - } - return hostport[:colon] -} - -// Port returns the port part of u.Host, without the leading colon. -// If u.Host doesn't contain a port, Port returns an empty string. -// -// Copied from the Go 1.8 standard library (net/url) -func portOnly(hostport string) string { - colon := strings.IndexByte(hostport, ':') - if colon == -1 { - return "" - } - if i := strings.Index(hostport, "]:"); i != -1 { - return hostport[i+len("]:"):] - } - if strings.Contains(hostport, "]") { - return "" - } - return hostport[colon+len(":"):] -} - -// Returns true if the specified URI is using the standard port -// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) -func isDefaultPort(scheme, port string) bool { - if port == "" { - return true - } - - lowerCaseScheme := strings.ToLower(scheme) - if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { - return true - } - - return false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go deleted file mode 100644 index 1de06a765..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go +++ /dev/null @@ -1,36 +0,0 @@ -package v4 - -import "time" - -// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. -type SigningTime struct { - time.Time - timeFormat string - shortTimeFormat string -} - -// NewSigningTime creates a new SigningTime given a time.Time -func NewSigningTime(t time.Time) SigningTime { - return SigningTime{ - Time: t, - } -} - -// TimeFormat provides a time formatted in the X-Amz-Date format. -func (m *SigningTime) TimeFormat() string { - return m.format(&m.timeFormat, TimeFormat) -} - -// ShortTimeFormat provides a time formatted of 20060102. -func (m *SigningTime) ShortTimeFormat() string { - return m.format(&m.shortTimeFormat, ShortTimeFormat) -} - -func (m *SigningTime) format(target *string, format string) string { - if len(*target) > 0 { - return *target - } - v := m.Time.Format(format) - *target = v - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go deleted file mode 100644 index 741019b5f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go +++ /dev/null @@ -1,64 +0,0 @@ -package v4 - -import ( - "net/url" - "strings" -) - -const doubleSpace = " " - -// StripExcessSpaces will rewrite the passed in slice's string values to not -// contain muliple side-by-side spaces. -func StripExcessSpaces(str string) string { - var j, k, l, m, spaces int - // Trim trailing spaces - for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { - } - - // Trim leading spaces - for k = 0; k < j && str[k] == ' '; k++ { - } - str = str[k : j+1] - - // Strip multiple spaces. - j = strings.Index(str, doubleSpace) - if j < 0 { - return str - } - - buf := []byte(str) - for k, m, l = j, j, len(buf); k < l; k++ { - if buf[k] == ' ' { - if spaces == 0 { - // First space. - buf[m] = buf[k] - m++ - } - spaces++ - } else { - // End of multiple spaces. - spaces = 0 - buf[m] = buf[k] - m++ - } - } - - return string(buf[:m]) -} - -// GetURIPath returns the escaped URI component from the provided URL -func GetURIPath(u *url.URL) string { - var uri string - - if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") - } else { - uri = u.EscapedPath() - } - - if len(uri) == 0 { - uri = "/" - } - - return uri -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go deleted file mode 100644 index 64b8b4e33..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go +++ /dev/null @@ -1,118 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "time" -) - -// HTTPSigner is SigV4a HTTP signer implementation -type HTTPSigner interface { - SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error -} - -// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware. -type SignHTTPRequestMiddlewareOptions struct { - Credentials CredentialsProvider - Signer HTTPSigner - LogSigning bool -} - -// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a. -type SignHTTPRequestMiddleware struct { - credentials CredentialsProvider - signer HTTPSigner - logSigning bool -} - -// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions. -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentials: options.Credentials, - signer: options.Signer, - logSigning: options.LogSigning, - } -} - -// ID the middleware identifier. -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize signs an HTTP request using SigV4a. -func (s *SignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if !hasCredentialProvider(s.credentials) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request) - } - - signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) - payloadHash := v4.GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} - } - - credentials, err := s.credentials.RetrievePrivateKey(ctx) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} - } - - signerOptions := []func(o *SignerOptions){ - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }, - } - - // existing DisableURIPathEscaping is equivalent in purpose - // to authentication scheme property DisableDoubleEncoding - disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) - if overridden { - signerOptions = append(signerOptions, func(o *SignerOptions) { - o.DisableURIPathEscaping = disableDoubleEncoding - }) - } - - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), signerOptions...) - if err != nil { - return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} - } - - return next.HandleFinalize(ctx, in) -} - -func hasCredentialProvider(p CredentialsProvider) bool { - if p == nil { - return false - } - - return true -} - -// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { - const signedID = "Signing" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go deleted file mode 100644 index 951fc415d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go +++ /dev/null @@ -1,117 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - "net/http" - "time" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go/middleware" - smithyHTTP "github.com/aws/smithy-go/transport/http" -) - -// HTTPPresigner is an interface to a SigV4a signer that can sign create a -// presigned URL for a HTTP requests. -type HTTPPresigner interface { - PresignHTTP( - ctx context.Context, credentials Credentials, r *http.Request, - payloadHash string, service string, regionSet []string, signingTime time.Time, - optFns ...func(*SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider CredentialsProvider - Presigner HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - credentialsProvider CredentialsProvider - presigner HTTPPresigner - logSigning bool -} - -// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware -// initialized with the presigner. -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -func (s *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyHTTP.Request) - if !ok { - return out, metadata, &SigningError{ - Err: fmt.Errorf("unexpected request middleware type %T", in.Request), - } - } - - httpReq := req.Build(ctx) - if !hasCredentialProvider(s.credentialsProvider) { - out.Result = &v4.PresignedHTTPRequest{ - URL: httpReq.URL.String(), - Method: httpReq.Method, - SignedHeader: http.Header{}, - } - - return out, metadata, nil - } - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - payloadHash := v4.GetPayloadHash(ctx) - if len(payloadHash) == 0 { - return out, metadata, &SigningError{ - Err: fmt.Errorf("computed payload hash missing from context"), - } - } - - credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - - u, h, err := s.presigner.PresignHTTP(ctx, credentials, - httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(), - func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &v4.PresignedHTTPRequest{ - URL: u, - Method: httpReq.Method, - SignedHeader: h, - } - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go deleted file mode 100644 index af4f6abcf..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ /dev/null @@ -1,92 +0,0 @@ -package v4a - -import ( - "context" - "fmt" - "time" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// CredentialsAdapter adapts v4a.Credentials to smithy auth.Identity. -type CredentialsAdapter struct { - Credentials Credentials -} - -var _ auth.Identity = (*CredentialsAdapter)(nil) - -// Expiration returns the time of expiration for the credentials. -func (v *CredentialsAdapter) Expiration() time.Time { - return v.Credentials.Expires -} - -// CredentialsProviderAdapter adapts v4a.CredentialsProvider to -// auth.IdentityResolver. -type CredentialsProviderAdapter struct { - Provider CredentialsProvider -} - -var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) - -// GetIdentity retrieves v4a credentials using the underlying provider. -func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( - auth.Identity, error, -) { - creds, err := v.Provider.RetrievePrivateKey(ctx) - if err != nil { - return nil, fmt.Errorf("get credentials: %w", err) - } - - return &CredentialsAdapter{Credentials: creds}, nil -} - -// SignerAdapter adapts v4a.HTTPSigner to smithy http.Signer. -type SignerAdapter struct { - Signer HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*SignerAdapter)(nil) - -// SignRequest signs the request with the provided identity. -func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4a signing name is required") - } - - regions, ok := smithyhttp.GetSigV4ASigningRegions(&props) - if !ok { - return fmt.Errorf("sigv4a signing region is required") - } - - hash := v4.GetPayloadHash(ctx) - signingTime := sdk.NowTime() - if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { - signingTime.Add(skew) - } - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %w", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go deleted file mode 100644 index f1f6ecc37..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go +++ /dev/null @@ -1,520 +0,0 @@ -package v4a - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash" - "math/big" - "net/http" - "net/textproto" - "net/url" - "sort" - "strconv" - "strings" - "time" - - signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto" - v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/logging" -) - -const ( - // AmzRegionSetKey represents the region set header used for sigv4a - AmzRegionSetKey = "X-Amz-Region-Set" - amzAlgorithmKey = v4Internal.AmzAlgorithmKey - amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey - amzDateKey = v4Internal.AmzDateKey - amzCredentialKey = v4Internal.AmzCredentialKey - amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey - authorizationHeader = "Authorization" - - signingAlgorithm = "AWS4-ECDSA-P256-SHA256" - - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" - - // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string - EmptyStringSHA256 = v4Internal.EmptyStringSHA256 - - // Version of signing v4a - Version = "SigV4A" -) - -var ( - p256 elliptic.Curve - nMinusTwoP256 *big.Int - - one = new(big.Int).SetInt64(1) -) - -func init() { - // Ensure the elliptic curve parameters are initialized on package import rather then on first usage - p256 = elliptic.P256() - - nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes()) - nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2)) -} - -// SignerOptions is the SigV4a signing options for constructing a Signer. -type SignerOptions struct { - Logger logging.Logger - LogSigning bool - - // Disables the Signer's moving HTTP header key/value pairs from the HTTP - // request header to the request's query string. This is most commonly used - // with pre-signed requests preventing headers from being added to the - // request's query string. - DisableHeaderHoisting bool - - // Disables the automatic escaping of the URI path of the request for the - // siganture's canonical string's path. For services that do not need additional - // escaping then use this to disable the signer escaping the path. - // - // S3 is an example of a service that does not need additional escaping. - // - // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - DisableURIPathEscaping bool -} - -// Signer is a SigV4a HTTP signing implementation -type Signer struct { - options SignerOptions -} - -// NewSigner constructs a SigV4a Signer. -func NewSigner(optFns ...func(*SignerOptions)) *Signer { - options := SignerOptions{} - - for _, fn := range optFns { - fn(&options) - } - - return &Signer{options: options} -} - -// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given -// IAM AccessKey and SecretKey pair. -// -// Based on FIPS.186-4 Appendix B.4.2 -func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) { - params := p256.Params() - bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits - counter := 0x01 - - buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey) - kdfContext := bytes.NewBuffer(buffer) - - inputKey := append([]byte("AWS4A"), []byte(secretKey)...) - - d := new(big.Int) - for { - kdfContext.Reset() - kdfContext.WriteString(accessKey) - kdfContext.WriteByte(byte(counter)) - - key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes()) - if err != nil { - return nil, err - } - - // Check key first before calling SetBytes if key key is in fact a valid candidate. - // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time - cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes()) - if err != nil { - return nil, err - } - if cmp == -1 { - d.SetBytes(key) - break - } - - counter++ - if counter > 0xFF { - return nil, fmt.Errorf("exhausted single byte external counter") - } - } - d = d.Add(d, one) - - priv := new(ecdsa.PrivateKey) - priv.PublicKey.Curve = p256 - priv.D = d - priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes()) - - return priv, nil -} - -type httpSigner struct { - Request *http.Request - ServiceName string - RegionSet []string - Time time.Time - Credentials Credentials - IsPreSign bool - - Logger logging.Logger - Debug bool - - // PayloadHash is the hex encoded SHA-256 hash of the request payload - // If len(PayloadHash) == 0 the signer will attempt to send the request - // as an unsigned payload. Note: Unsigned payloads only work for a subset of services. - PayloadHash string - - DisableHeaderHoisting bool - DisableURIPathEscaping bool -} - -// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a. -// The passed in request will be modified in place. -func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error { - options := s.options - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - RegionSet: regionSet, - Credentials: credentials, - Time: signingTime.UTC(), - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - } - - signedRequest, err := signer.Build() - if err != nil { - return err - } - - logHTTPSigningInfo(ctx, options, signedRequest) - - return nil -} - -// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a -// Returns the presigned URL along with the headers that were signed with the request. -// -// PresignHTTP will not set the expires time of the presigned request -// automatically. To specify the expire duration for a request add the -// "X-Amz-Expires" query parameter on the request with the value as the -// duration in seconds the presigned URL should be considered valid for. This -// parameter is not used by all AWS services, and is most notable used by -// Amazon S3 APIs. -func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) { - options := s.options - for _, fn := range optFns { - fn(&options) - } - - signer := &httpSigner{ - Request: r, - PayloadHash: payloadHash, - ServiceName: service, - RegionSet: regionSet, - Credentials: credentials, - Time: signingTime.UTC(), - IsPreSign: true, - DisableHeaderHoisting: options.DisableHeaderHoisting, - DisableURIPathEscaping: options.DisableURIPathEscaping, - } - - signedRequest, err := signer.Build() - if err != nil { - return "", nil, err - } - - logHTTPSigningInfo(ctx, options, signedRequest) - - signedHeaders = make(http.Header) - - // For the signed headers we canonicalize the header keys in the returned map. - // This avoids situations where can standard library double headers like host header. For example the standard - // library will set the Host header, even if it is present in lower-case form. - for k, v := range signedRequest.SignedHeaders { - key := textproto.CanonicalMIMEHeaderKey(k) - signedHeaders[key] = append(signedHeaders[key], v...) - } - - return signedRequest.Request.URL.String(), signedHeaders, nil -} - -func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { - amzDate := s.Time.Format(timeFormat) - - if s.IsPreSign { - query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) - query.Set(amzDateKey, amzDate) - query.Set(amzAlgorithmKey, signingAlgorithm) - if len(s.Credentials.SessionToken) > 0 { - query.Set(amzSecurityTokenKey, s.Credentials.SessionToken) - } - return - } - - headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ",")) - headers.Set(amzDateKey, amzDate) - if len(s.Credentials.SessionToken) > 0 { - headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken) - } -} - -func (s *httpSigner) Build() (signedRequest, error) { - req := s.Request - - query := req.URL.Query() - headers := req.Header - - s.setRequiredSigningFields(headers, query) - - // Sort Each Query Key's Values - for key := range query { - sort.Strings(query[key]) - } - - v4Internal.SanitizeHostForHeader(req) - - credentialScope := s.buildCredentialScope() - credentialStr := s.Credentials.Context + "/" + credentialScope - if s.IsPreSign { - query.Set(amzCredentialKey, credentialStr) - } - - unsignedHeaders := headers - if s.IsPreSign && !s.DisableHeaderHoisting { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders) - for k := range urlValues { - query[k] = urlValues[k] - } - } - - host := req.URL.Host - if len(req.Host) > 0 { - host = req.Host - } - - signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) - - if s.IsPreSign { - query.Set(amzSignedHeadersKey, signedHeadersStr) - } - - rawQuery := strings.Replace(query.Encode(), "+", "%20", -1) - - canonicalURI := v4Internal.GetURIPath(req.URL) - if !s.DisableURIPathEscaping { - canonicalURI = httpbinding.EscapePath(canonicalURI, false) - } - - canonicalString := s.buildCanonicalString( - req.Method, - canonicalURI, - rawQuery, - signedHeadersStr, - canonicalHeaderStr, - ) - - strToSign := s.buildStringToSign(credentialScope, canonicalString) - signingSignature, err := s.buildSignature(strToSign) - if err != nil { - return signedRequest{}, err - } - - if s.IsPreSign { - rawQuery += "&X-Amz-Signature=" + signingSignature - } else { - headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) - } - - req.URL.RawQuery = rawQuery - - return signedRequest{ - Request: req, - SignedHeaders: signedHeaders, - CanonicalString: canonicalString, - StringToSign: strToSign, - PreSigned: s.IsPreSign, - }, nil -} - -func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { - const credential = "Credential=" - const signedHeaders = "SignedHeaders=" - const signature = "Signature=" - const commaSpace = ", " - - var parts strings.Builder - parts.Grow(len(signingAlgorithm) + 1 + - len(credential) + len(credentialStr) + len(commaSpace) + - len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) + - len(signature) + len(signingSignature), - ) - parts.WriteString(signingAlgorithm) - parts.WriteRune(' ') - parts.WriteString(credential) - parts.WriteString(credentialStr) - parts.WriteString(commaSpace) - parts.WriteString(signedHeaders) - parts.WriteString(signedHeadersStr) - parts.WriteString(commaSpace) - parts.WriteString(signature) - parts.WriteString(signingSignature) - return parts.String() -} - -func (s *httpSigner) buildCredentialScope() string { - return strings.Join([]string{ - s.Time.Format(shortTimeFormat), - s.ServiceName, - "aws4_request", - }, "/") - -} - -func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} - -func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { - signed = make(http.Header) - - var headers []string - const hostHeader = "host" - headers = append(headers, hostHeader) - signed[hostHeader] = append(signed[hostHeader], host) - - if length > 0 { - const contentLengthHeader = "content-length" - headers = append(headers, contentLengthHeader) - signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) - } - - for k, v := range header { - if !rule.IsValid(k) { - continue // ignored header - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := signed[lowerCaseKey]; ok { - // include additional values - signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - signed[lowerCaseKey] = v - } - sort.Strings(headers) - - signedHeaders = strings.Join(headers, ";") - - var canonicalHeaders strings.Builder - n := len(headers) - const colon = ':' - for i := 0; i < n; i++ { - if headers[i] == hostHeader { - canonicalHeaders.WriteString(hostHeader) - canonicalHeaders.WriteRune(colon) - canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) - } else { - canonicalHeaders.WriteString(headers[i]) - canonicalHeaders.WriteRune(colon) - // Trim out leading, trailing, and dedup inner spaces from signed header values. - values := signed[headers[i]] - for j, v := range values { - cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) - canonicalHeaders.WriteString(cleanedValue) - if j < len(values)-1 { - canonicalHeaders.WriteRune(',') - } - } - } - canonicalHeaders.WriteRune('\n') - } - canonicalHeadersStr = canonicalHeaders.String() - - return signed, signedHeaders, canonicalHeadersStr -} - -func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { - return strings.Join([]string{ - method, - uri, - query, - canonicalHeaders, - signedHeaders, - s.PayloadHash, - }, "\n") -} - -func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { - return strings.Join([]string{ - signingAlgorithm, - s.Time.Format(timeFormat), - credentialScope, - hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), - }, "\n") -} - -func makeHash(hash hash.Hash, b []byte) []byte { - hash.Reset() - hash.Write(b) - return hash.Sum(nil) -} - -func (s *httpSigner) buildSignature(strToSign string) (string, error) { - sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256) - if err != nil { - return "", err - } - return hex.EncodeToString(sig), nil -} - -const logSignInfoMsg = `Request Signature: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) { - if !options.LogSigning { - return - } - signedURLMsg := "" - if r.PreSigned { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String()) - } - logger := logging.WithContext(ctx, options.Logger) - logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg) -} - -type signedRequest struct { - Request *http.Request - SignedHeaders http.Header - CanonicalString string - StringToSign string - PreSigned bool -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md deleted file mode 100644 index 8ab28d3a9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ /dev/null @@ -1,156 +0,0 @@ -# v1.12.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. - -# v1.12.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. - -# v1.11.5 (2024-09-20) - -* No change notes available for this release. - -# v1.11.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. - -# v1.11.3 (2024-06-28) - -* No change notes available for this release. - -# v1.11.2 (2024-03-29) - -* No change notes available for this release. - -# v1.11.1 (2024-02-21) - -* No change notes available for this release. - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# v1.10.4 (2023-12-07) - -* No change notes available for this release. - -# v1.10.3 (2023-11-30) - -* No change notes available for this release. - -# v1.10.2 (2023-11-29) - -* No change notes available for this release. - -# v1.10.1 (2023-11-15) - -* No change notes available for this release. - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). - -# v1.9.15 (2023-10-06) - -* No change notes available for this release. - -# v1.9.14 (2023-08-18) - -* No change notes available for this release. - -# v1.9.13 (2023-08-07) - -* No change notes available for this release. - -# v1.9.12 (2023-07-31) - -* No change notes available for this release. - -# v1.9.11 (2022-12-02) - -* No change notes available for this release. - -# v1.9.10 (2022-10-24) - -* No change notes available for this release. - -# v1.9.9 (2022-09-14) - -* No change notes available for this release. - -# v1.9.8 (2022-09-02) - -* No change notes available for this release. - -# v1.9.7 (2022-08-31) - -* No change notes available for this release. - -# v1.9.6 (2022-08-29) - -* No change notes available for this release. - -# v1.9.5 (2022-08-11) - -* No change notes available for this release. - -# v1.9.4 (2022-08-09) - -* No change notes available for this release. - -# v1.9.3 (2022-06-29) - -* No change notes available for this release. - -# v1.9.2 (2022-06-07) - -* No change notes available for this release. - -# v1.9.1 (2022-03-24) - -* No change notes available for this release. - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go deleted file mode 100644 index 3f451fc9b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go +++ /dev/null @@ -1,176 +0,0 @@ -package acceptencoding - -import ( - "compress/gzip" - "context" - "fmt" - "io" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const acceptEncodingHeaderKey = "Accept-Encoding" -const contentEncodingHeaderKey = "Content-Encoding" - -// AddAcceptEncodingGzipOptions provides the options for the -// AddAcceptEncodingGzip middleware setup. -type AddAcceptEncodingGzipOptions struct { - Enable bool -} - -// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP -// middleware to the operation stack. This allows checksums to be correctly -// computed without disabling GZIP support. -func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error { - if options.Enable { - if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil { - return err - } - if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil { - return err - } - return nil - } - - return stack.Finalize.Add(&DisableGzip{}, middleware.Before) -} - -// DisableGzip provides the middleware that will -// disable the underlying http client automatically enabling for gzip -// decompress content-encoding support. -type DisableGzip struct{} - -// ID returns the id for the middleware. -func (*DisableGzip) ID() string { - return "DisableAcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*DisableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "identity") - - return next.HandleFinalize(ctx, input) -} - -// EnableGzip provides a middleware to enable support for -// gzip responses, with manual decompression. This prevents the underlying HTTP -// client from performing the gzip decompression automatically. -type EnableGzip struct{} - -// ID returns the id for the middleware. -func (*EnableGzip) ID() string { - return "AcceptEncodingGzip" -} - -// HandleFinalize implements the FinalizeMiddleware interface. -func (*EnableGzip) HandleFinalize( - ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - output middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, &smithy.SerializationError{ - Err: fmt.Errorf("unknown request type %T", input.Request), - } - } - - // Explicitly enable gzip support, this will prevent the http client from - // auto extracting the zipped content. - req.Header.Set(acceptEncodingHeaderKey, "gzip") - - return next.HandleFinalize(ctx, input) -} - -// DecompressGzip provides the middleware for decompressing a gzip -// response from the service. -type DecompressGzip struct{} - -// ID returns the id for the middleware. -func (*DecompressGzip) ID() string { - return "DecompressGzip" -} - -// HandleDeserialize implements the DeserializeMiddlware interface. -func (*DecompressGzip) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - output, metadata, err = next.HandleDeserialize(ctx, input) - if err != nil { - return output, metadata, err - } - - resp, ok := output.RawResponse.(*smithyhttp.Response) - if !ok { - return output, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("unknown response type %T", output.RawResponse), - } - } - if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" { - return output, metadata, err - } - - // Clear content length since it will no longer be valid once the response - // body is decompressed. - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - - resp.Body = wrapGzipReader(resp.Body) - - return output, metadata, err -} - -type gzipReader struct { - reader io.ReadCloser - gzip *gzip.Reader -} - -func wrapGzipReader(reader io.ReadCloser) *gzipReader { - return &gzipReader{ - reader: reader, - } -} - -// Read wraps the gzip reader around the underlying io.Reader to extract the -// response bytes on the fly. -func (g *gzipReader) Read(b []byte) (n int, err error) { - if g.gzip == nil { - g.gzip, err = gzip.NewReader(g.reader) - if err != nil { - g.gzip = nil // ensure uninitialized gzip value isn't used in close. - return 0, fmt.Errorf("failed to decompress gzip response, %w", err) - } - } - - return g.gzip.Read(b) -} - -func (g *gzipReader) Close() error { - if g.gzip == nil { - return nil - } - - if err := g.gzip.Close(); err != nil { - g.reader.Close() - return fmt.Errorf("failed to decompress gzip response, %w", err) - } - - return g.reader.Close() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go deleted file mode 100644 index 7056d9bf6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Package acceptencoding provides customizations associated with Accept Encoding Header. - -# Accept encoding gzip - -The Go HTTP client automatically supports accept-encoding and content-encoding -gzip by default. This default behavior is not desired by the SDK, and prevents -validating the response body's checksum. To prevent this the SDK must manually -control usage of content-encoding gzip. - -To control content-encoding, the SDK must always set the `Accept-Encoding` -header to a value. This prevents the HTTP client from using gzip automatically. -When gzip is enabled on the API client, the SDK's customization will control -decompressing the gzip data in order to not break the checksum validation. When -gzip is disabled, the API client will disable gzip, preventing the HTTP -client's default behavior. - -An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using -the below middleware. The option if present can be used to enable auto decompressing -gzip by the SDK. -*/ -package acceptencoding diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go deleted file mode 100644 index 1514acbe3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package acceptencoding - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md deleted file mode 100644 index 56b54e699..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ /dev/null @@ -1,339 +0,0 @@ -# v1.5.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.20 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.19 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.18 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.17 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.16 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.15 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.14 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.13 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.11 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.7 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.6 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.5 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.4 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2024-03-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.38 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.37 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.36 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.35 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.34 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.33 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.32 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.31 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.30 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.29 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.28 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.27 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.26 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.25 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.24 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.23 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.22 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.21 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.20 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.19 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.18 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.17 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.16 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.15 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.14 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.6 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.5 (2022-04-27) - -* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. - -# v1.1.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2022-03-08) - -* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606) -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.0.0 (2022-02-24) - -* **Release**: New module for computing checksums -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go deleted file mode 100644 index d241bf59f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go +++ /dev/null @@ -1,326 +0,0 @@ -package checksum - -import ( - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "fmt" - "hash" - "hash/crc32" - "io" - "strings" - "sync" -) - -// Algorithm represents the checksum algorithms supported -type Algorithm string - -// Enumeration values for supported checksum Algorithms. -const ( - // AlgorithmCRC32C represents CRC32C hash algorithm - AlgorithmCRC32C Algorithm = "CRC32C" - - // AlgorithmCRC32 represents CRC32 hash algorithm - AlgorithmCRC32 Algorithm = "CRC32" - - // AlgorithmSHA1 represents SHA1 hash algorithm - AlgorithmSHA1 Algorithm = "SHA1" - - // AlgorithmSHA256 represents SHA256 hash algorithm - AlgorithmSHA256 Algorithm = "SHA256" - - // AlgorithmCRC64NVME represents CRC64NVME hash algorithm - AlgorithmCRC64NVME Algorithm = "CRC64NVME" -) - -var supportedAlgorithms = []Algorithm{ - AlgorithmCRC32C, - AlgorithmCRC32, - AlgorithmSHA1, - AlgorithmSHA256, -} - -func (a Algorithm) String() string { return string(a) } - -// ParseAlgorithm attempts to parse the provided value into a checksum -// algorithm, matching without case. Returns the algorithm matched, or an error -// if the algorithm wasn't matched. -func ParseAlgorithm(v string) (Algorithm, error) { - for _, a := range supportedAlgorithms { - if strings.EqualFold(string(a), v) { - return a, nil - } - } - return "", fmt.Errorf("unknown checksum algorithm, %v", v) -} - -// FilterSupportedAlgorithms filters the set of algorithms, returning a slice -// of algorithms that are supported. -func FilterSupportedAlgorithms(vs []string) []Algorithm { - found := map[Algorithm]struct{}{} - - supported := make([]Algorithm, 0, len(supportedAlgorithms)) - for _, v := range vs { - for _, a := range supportedAlgorithms { - // Only consider algorithms that are supported - if !strings.EqualFold(v, string(a)) { - continue - } - // Ignore duplicate algorithms in list. - if _, ok := found[a]; ok { - continue - } - - supported = append(supported, a) - found[a] = struct{}{} - } - } - return supported -} - -// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is -// returned if the algorithm is unknown. -func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { - switch v { - case AlgorithmSHA1: - return sha1.New(), nil - case AlgorithmSHA256: - return sha256.New(), nil - case AlgorithmCRC32: - return crc32.NewIEEE(), nil - case AlgorithmCRC32C: - return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil - default: - return nil, fmt.Errorf("unknown checksum algorithm, %v", v) - } -} - -// AlgorithmChecksumLength returns the length of the algorithm's checksum in -// bytes. If the algorithm is not known, an error is returned. -func AlgorithmChecksumLength(v Algorithm) (int, error) { - switch v { - case AlgorithmSHA1: - return sha1.Size, nil - case AlgorithmSHA256: - return sha256.Size, nil - case AlgorithmCRC32: - return crc32.Size, nil - case AlgorithmCRC32C: - return crc32.Size, nil - default: - return 0, fmt.Errorf("unknown checksum algorithm, %v", v) - } -} - -const awsChecksumHeaderPrefix = "x-amz-checksum-" - -// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash. -func AlgorithmHTTPHeader(v Algorithm) string { - return awsChecksumHeaderPrefix + strings.ToLower(string(v)) -} - -// base64EncodeHashSum computes base64 encoded checksum of a given running -// hash. The running hash must already have content written to it. Returns the -// byte slice of checksum and an error -func base64EncodeHashSum(h hash.Hash) []byte { - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - return sum64 -} - -// hexEncodeHashSum computes hex encoded checksum of a given running hash. The -// running hash must already have content written to it. Returns the byte slice -// of checksum and an error -func hexEncodeHashSum(h hash.Hash) []byte { - sum := h.Sum(nil) - sumHex := make([]byte, hex.EncodedLen(len(sum))) - hex.Encode(sumHex, sum) - return sumHex -} - -// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents. -// Returns the byte slice of MD5 checksum and an error. -func computeMD5Checksum(r io.Reader) ([]byte, error) { - h := md5.New() - - // Copy errors may be assumed to be from the body. - if _, err := io.Copy(h, r); err != nil { - return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err) - } - - // Encode the MD5 checksum in base64. - return base64EncodeHashSum(h), nil -} - -// computeChecksumReader provides a reader wrapping an underlying io.Reader to -// compute the checksum of the stream's bytes. -type computeChecksumReader struct { - stream io.Reader - algorithm Algorithm - hasher hash.Hash - base64ChecksumLen int - - mux sync.RWMutex - lockedChecksum string - lockedErr error -} - -// newComputeChecksumReader returns a computeChecksumReader for the stream and -// algorithm specified. Returns error if unable to create the reader, or -// algorithm is unknown. -func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return nil, err - } - - checksumLength, err := AlgorithmChecksumLength(algorithm) - if err != nil { - return nil, err - } - - return &computeChecksumReader{ - stream: io.TeeReader(stream, hasher), - algorithm: algorithm, - hasher: hasher, - base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength), - }, nil -} - -// Read wraps the underlying reader. When the underlying reader returns EOF, -// the checksum of the reader will be computed, and can be retrieved with -// ChecksumBase64String. -func (r *computeChecksumReader) Read(p []byte) (int, error) { - n, err := r.stream.Read(p) - if err == nil { - return n, nil - } else if err != io.EOF { - r.mux.Lock() - defer r.mux.Unlock() - - r.lockedErr = err - return n, err - } - - b := base64EncodeHashSum(r.hasher) - - r.mux.Lock() - defer r.mux.Unlock() - - r.lockedChecksum = string(b) - - return n, err -} - -func (r *computeChecksumReader) Algorithm() Algorithm { - return r.algorithm -} - -// Base64ChecksumLength returns the base64 encoded length of the checksum for -// algorithm. -func (r *computeChecksumReader) Base64ChecksumLength() int { - return r.base64ChecksumLen -} - -// Base64Checksum returns the base64 checksum for the algorithm, or error if -// the underlying reader returned a non-EOF error. -// -// Safe to be called concurrently, but will return an error until after the -// underlying reader is returns EOF. -func (r *computeChecksumReader) Base64Checksum() (string, error) { - r.mux.RLock() - defer r.mux.RUnlock() - - if r.lockedErr != nil { - return "", r.lockedErr - } - - if r.lockedChecksum == "" { - return "", fmt.Errorf( - "checksum not available yet, called before reader returns EOF", - ) - } - - return r.lockedChecksum, nil -} - -// validateChecksumReader implements io.ReadCloser interface. The wrapper -// performs checksum validation when the underlying reader has been fully read. -type validateChecksumReader struct { - originalBody io.ReadCloser - body io.Reader - hasher hash.Hash - algorithm Algorithm - expectChecksum string -} - -// newValidateChecksumReader returns a configured io.ReadCloser that performs -// checksum validation when the underlying reader has been fully read. -func newValidateChecksumReader( - body io.ReadCloser, - algorithm Algorithm, - expectChecksum string, -) (*validateChecksumReader, error) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return nil, err - } - - return &validateChecksumReader{ - originalBody: body, - body: io.TeeReader(body, hasher), - hasher: hasher, - algorithm: algorithm, - expectChecksum: expectChecksum, - }, nil -} - -// Read attempts to read from the underlying stream while also updating the -// running hash. If the underlying stream returns with an EOF error, the -// checksum of the stream will be collected, and compared against the expected -// checksum. If the checksums do not match, an error will be returned. -// -// If a non-EOF error occurs when reading the underlying stream, that error -// will be returned and the checksum for the stream will be discarded. -func (c *validateChecksumReader) Read(p []byte) (n int, err error) { - n, err = c.body.Read(p) - if err == io.EOF { - if checksumErr := c.validateChecksum(); checksumErr != nil { - return n, checksumErr - } - } - - return n, err -} - -// Close closes the underlying reader, returning any error that occurred in the -// underlying reader. -func (c *validateChecksumReader) Close() (err error) { - return c.originalBody.Close() -} - -func (c *validateChecksumReader) validateChecksum() error { - // Compute base64 encoded checksum hash of the payload's read bytes. - v := base64EncodeHashSum(c.hasher) - if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) { - return validationError{ - Algorithm: c.algorithm, Expect: e, Actual: a, - } - } - - return nil -} - -type validationError struct { - Algorithm Algorithm - Expect string - Actual string -} - -func (v validationError) Error() string { - return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v", - v.Algorithm, v.Expect, v.Actual) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go deleted file mode 100644 index 3bd320c43..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go +++ /dev/null @@ -1,389 +0,0 @@ -package checksum - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -const ( - crlf = "\r\n" - - // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html - defaultChunkLength = 1024 * 64 - - awsTrailerHeaderName = "x-amz-trailer" - decodedContentLengthHeaderName = "x-amz-decoded-content-length" - - contentEncodingHeaderName = "content-encoding" - awsChunkedContentEncodingHeaderValue = "aws-chunked" - - trailerKeyValueSeparator = ":" -) - -var ( - crlfBytes = []byte(crlf) - finalChunkBytes = []byte("0" + crlf) -) - -type awsChunkedEncodingOptions struct { - // The total size of the stream. For unsigned encoding this implies that - // there will only be a single chunk containing the underlying payload, - // unless ChunkLength is also specified. - StreamLength int64 - - // Set of trailer key:value pairs that will be appended to the end of the - // payload after the end chunk has been written. - Trailers map[string]awsChunkedTrailerValue - - // The maximum size of each chunk to be sent. Default value of -1, signals - // that optimal chunk length will be used automatically. ChunkSize must be - // at least 8KB. - // - // If ChunkLength and StreamLength are both specified, the stream will be - // broken up into ChunkLength chunks. The encoded length of the aws-chunked - // encoding can still be determined as long as all trailers, if any, have a - // fixed length. - ChunkLength int -} - -type awsChunkedTrailerValue struct { - // Function to retrieve the value of the trailer. Will only be called after - // the underlying stream returns EOF error. - Get func() (string, error) - - // If the length of the value can be pre-determined, and is constant - // specify the length. A value of -1 means the length is unknown, or - // cannot be pre-determined. - Length int -} - -// awsChunkedEncoding provides a reader that wraps the payload such that -// payload is read as a single aws-chunk payload. This reader can only be used -// if the content length of payload is known. Content-Length is used as size of -// the single payload chunk. The final chunk and trailing checksum is appended -// at the end. -// -// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition -// -// Here is the aws-chunked payload stream as read from the awsChunkedEncoding -// if original request stream is "Hello world", and checksum hash used is SHA256 -// -// \r\n -// Hello world\r\n -// 0\r\n -// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n -// \r\n -type awsChunkedEncoding struct { - options awsChunkedEncodingOptions - - encodedStream io.Reader - trailerEncodedLength int -} - -// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured -// for unsigned aws-chunked content encoding. Any additional trailers that need -// to be appended after the end chunk must be included as via Trailer -// callbacks. -func newUnsignedAWSChunkedEncoding( - stream io.Reader, - optFns ...func(*awsChunkedEncodingOptions), -) *awsChunkedEncoding { - options := awsChunkedEncodingOptions{ - Trailers: map[string]awsChunkedTrailerValue{}, - StreamLength: -1, - ChunkLength: -1, - } - for _, fn := range optFns { - fn(&options) - } - - var chunkReader io.Reader - if options.ChunkLength != -1 || options.StreamLength == -1 { - if options.ChunkLength == -1 { - options.ChunkLength = defaultChunkLength - } - chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength) - } else { - chunkReader = newUnsignedChunkReader(stream, options.StreamLength) - } - - trailerReader := newAWSChunkedTrailerReader(options.Trailers) - - return &awsChunkedEncoding{ - options: options, - encodedStream: io.MultiReader(chunkReader, - trailerReader, - bytes.NewBuffer(crlfBytes), - ), - trailerEncodedLength: trailerReader.EncodedLength(), - } -} - -// EncodedLength returns the final length of the aws-chunked content encoded -// stream if it can be determined without reading the underlying stream or lazy -// header values, otherwise -1 is returned. -func (e *awsChunkedEncoding) EncodedLength() int64 { - var length int64 - if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 { - return -1 - } - - if e.options.StreamLength != 0 { - // If the stream length is known, and there is no chunk length specified, - // only a single chunk will be used. Otherwise the stream length needs to - // include the multiple chunk padding content. - if e.options.ChunkLength == -1 { - length += getUnsignedChunkBytesLength(e.options.StreamLength) - - } else { - // Compute chunk header and payload length - numChunks := e.options.StreamLength / int64(e.options.ChunkLength) - length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength)) - if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 { - length += getUnsignedChunkBytesLength(remainder) - } - } - } - - // End chunk - length += int64(len(finalChunkBytes)) - - // Trailers - length += int64(e.trailerEncodedLength) - - // Encoding terminator - length += int64(len(crlf)) - - return length -} - -func getUnsignedChunkBytesLength(payloadLength int64) int64 { - payloadLengthStr := strconv.FormatInt(payloadLength, 16) - return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf)) -} - -// HTTPHeaders returns the set of headers that must be included the request for -// aws-chunked to work. This includes the content-encoding: aws-chunked header. -// -// If there are multiple layered content encoding, the aws-chunked encoding -// must be appended to the previous layers the stream's encoding. The best way -// to do this is to append all header values returned to the HTTP request's set -// of headers. -func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string { - headers := map[string][]string{ - contentEncodingHeaderName: { - awsChunkedContentEncodingHeaderValue, - }, - } - - if len(e.options.Trailers) != 0 { - trailers := make([]string, 0, len(e.options.Trailers)) - for name := range e.options.Trailers { - trailers = append(trailers, strings.ToLower(name)) - } - headers[awsTrailerHeaderName] = trailers - } - - return headers -} - -func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) { - return e.encodedStream.Read(b) -} - -// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked -// content encoded trailers. The trailer values will not be retrieved until the -// reader is read from. -type awsChunkedTrailerReader struct { - reader *bytes.Buffer - trailers map[string]awsChunkedTrailerValue - trailerEncodedLength int -} - -// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to -// lazy reading aws-chunk content encoded trailers. -func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader { - return &awsChunkedTrailerReader{ - trailers: trailers, - trailerEncodedLength: trailerEncodedLength(trailers), - } -} - -func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) { - for name, trailer := range trailers { - length += len(name) + len(trailerKeyValueSeparator) - l := trailer.Length - if l == -1 { - return -1 - } - length += l + len(crlf) - } - - return length -} - -// EncodedLength returns the length of the encoded trailers if the length could -// be determined without retrieving the header values. Returns -1 if length is -// unknown. -func (r *awsChunkedTrailerReader) EncodedLength() (length int) { - return r.trailerEncodedLength -} - -// Read populates the passed in byte slice with bytes from the encoded -// trailers. Will lazy read header values first time Read is called. -func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) { - if r.trailerEncodedLength == 0 { - return 0, io.EOF - } - - if r.reader == nil { - trailerLen := r.trailerEncodedLength - if r.trailerEncodedLength == -1 { - trailerLen = 0 - } - r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen)) - for name, trailer := range r.trailers { - r.reader.WriteString(name) - r.reader.WriteString(trailerKeyValueSeparator) - v, err := trailer.Get() - if err != nil { - return 0, fmt.Errorf("failed to get trailer value, %w", err) - } - r.reader.WriteString(v) - r.reader.WriteString(crlf) - } - } - - return r.reader.Read(p) -} - -// newUnsignedChunkReader returns an io.Reader encoding the underlying reader -// as unsigned aws-chunked chunks. The returned reader will also include the -// end chunk, but not the aws-chunked final `crlf` segment so trailers can be -// added. -// -// If the payload size is -1 for unknown length the content will be buffered in -// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding. -func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader { - if payloadSize == -1 { - return newBufferedAWSChunkReader(reader, defaultChunkLength) - } - - var endChunk bytes.Buffer - if payloadSize == 0 { - endChunk.Write(finalChunkBytes) - return &endChunk - } - - endChunk.WriteString(crlf) - endChunk.Write(finalChunkBytes) - - var header bytes.Buffer - header.WriteString(strconv.FormatInt(payloadSize, 16)) - header.WriteString(crlf) - return io.MultiReader( - &header, - reader, - &endChunk, - ) -} - -// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader. -// Will include end chunk, but not the aws-chunked final `crlf` segment so -// trailers can be added. -// -// Note does not implement support for chunk extensions, e.g. chunk signing. -type bufferedAWSChunkReader struct { - reader io.Reader - chunkSize int - chunkSizeStr string - - headerBuffer *bytes.Buffer - chunkBuffer *bytes.Buffer - - multiReader io.Reader - multiReaderLen int - endChunkDone bool -} - -// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading -// aws-chunked encoded chunks. -func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader { - return &bufferedAWSChunkReader{ - reader: reader, - chunkSize: chunkSize, - chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16), - - headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)), - chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))), - } -} - -// Read attempts to read from the underlying io.Reader writing aws-chunked -// chunk encoded bytes to p. When the underlying io.Reader has been completed -// read the end chunk will be available. Once the end chunk is read, the reader -// will return EOF. -func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) { - if r.multiReaderLen == 0 && r.endChunkDone { - return 0, io.EOF - } - if r.multiReader == nil || r.multiReaderLen == 0 { - r.multiReader, r.multiReaderLen, err = r.newMultiReader() - if err != nil { - return 0, err - } - } - - n, err = r.multiReader.Read(p) - r.multiReaderLen -= n - - if err == io.EOF && !r.endChunkDone { - // Edge case handling when the multi-reader has been completely read, - // and returned an EOF, make sure that EOF only gets returned if the - // end chunk was included in the multi-reader. Otherwise, the next call - // to read will initialize the next chunk's multi-reader. - err = nil - } - return n, err -} - -// newMultiReader returns a new io.Reader for wrapping the next chunk. Will -// return an error if the underlying reader can not be read from. Will never -// return io.EOF. -func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) { - // io.Copy eats the io.EOF returned by io.LimitReader. Any error that - // occurs here is due to an actual read error. - n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize))) - if err != nil { - return nil, 0, err - } - if n == 0 { - // Early exit writing out only the end chunk. This does not include - // aws-chunk's final `crlf` so that trailers can still be added by - // upstream reader. - r.headerBuffer.Reset() - r.headerBuffer.WriteString("0") - r.headerBuffer.WriteString(crlf) - r.endChunkDone = true - - return r.headerBuffer, r.headerBuffer.Len(), nil - } - r.chunkBuffer.WriteString(crlf) - - chunkSizeStr := r.chunkSizeStr - if int(n) != r.chunkSize { - chunkSizeStr = strconv.FormatInt(n, 16) - } - - r.headerBuffer.Reset() - r.headerBuffer.WriteString(chunkSizeStr) - r.headerBuffer.WriteString(crlf) - - return io.MultiReader( - r.headerBuffer, - r.chunkBuffer, - ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go deleted file mode 100644 index d520eabf3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package checksum - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.5.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go deleted file mode 100644 index 11243a804..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ /dev/null @@ -1,190 +0,0 @@ -package checksum - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/smithy-go/middleware" -) - -// InputMiddlewareOptions provides the options for the request -// checksum middleware setup. -type InputMiddlewareOptions struct { - // GetAlgorithm is a function to get the checksum algorithm of the - // input payload from the input parameters. - // - // Given the input parameter value, the function must return the algorithm - // and true, or false if no algorithm is specified. - GetAlgorithm func(interface{}) (string, bool) - - // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. - // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, - // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequireChecksum bool - - // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is - // set to true, checksum will be calculated and this field will be ignored, otherwise - // RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Enables support for wrapping the serialized input payload with a - // content-encoding: aws-check wrapper, and including a trailer for the - // algorithm's checksum value. - // - // The checksum will not be computed, nor added as trailing checksum, if - // the Algorithm's header is already set on the request. - EnableTrailingChecksum bool - - // Enables support for computing the SHA256 checksum of input payloads - // along with the algorithm specified checksum. Prevents downstream - // middleware handlers (computePayloadSHA256) re-reading the payload. - // - // The SHA256 payload checksum will only be used for computed for requests - // that are not TLS, or do not enable trailing checksums. - // - // The SHA256 payload hash will not be computed, if the Algorithm's header - // is already set on the request. - EnableComputeSHA256PayloadHash bool - - // Enables support for setting the aws-chunked decoded content length - // header for the decoded length of the underlying stream. Will only be set - // when used with trailing checksums, and aws-chunked content-encoding. - EnableDecodedContentLengthHeader bool -} - -// AddInputMiddleware adds the middleware for performing checksum computing -// of request payloads, and checksum validation of response payloads. -func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { - // TODO ensure this works correctly with presigned URLs - - // Middleware stack: - // * (OK)(Initialize) --none-- - // * (OK)(Serialize) EndpointResolver - // * (OK)(Build) ComputeContentLength - // * (AD)(Build) Header ComputeInputPayloadChecksum - // * SIGNED Payload - If HTTP && not support trailing checksum - // * UNSIGNED Payload - If HTTPS && not support trailing checksum - // * (RM)(Build) ContentChecksum - OK to remove - // * (OK)(Build) ComputePayloadHash - // * v4.dynamicPayloadSigningMiddleware - // * v4.computePayloadSHA256 - // * v4.unsignedPayload - // (OK)(Build) Set computedPayloadHash header - // * (OK)(Finalize) Retry - // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, - // * Requires HTTPS && support trailing checksum - // * UNSIGNED Payload - // * Finalize run if HTTPS && support trailing checksum - // * (OK)(Finalize) Signing - // * (OK)(Deserialize) --none-- - - // Initial checksum configuration look up middleware - err = stack.Initialize.Add(&setupInputContext{ - GetAlgorithm: options.GetAlgorithm, - RequireChecksum: options.RequireChecksum, - RequestChecksumCalculation: options.RequestChecksumCalculation, - }, middleware.Before) - if err != nil { - return err - } - - stack.Build.Remove("ContentChecksum") - - inputChecksum := &computeInputPayloadChecksum{ - EnableTrailingChecksum: options.EnableTrailingChecksum, - EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, - EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { - return err - } - - // If trailing checksum is not supported no need for finalize handler to be added. - if options.EnableTrailingChecksum { - trailerMiddleware := &addInputChecksumTrailer{ - EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, - EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, - } - if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil { - return err - } - } - - return nil -} - -// RemoveInputMiddleware Removes the compute input payload checksum middleware -// handlers from the stack. -func RemoveInputMiddleware(stack *middleware.Stack) { - id := (*setupInputContext)(nil).ID() - stack.Initialize.Remove(id) - - id = (*computeInputPayloadChecksum)(nil).ID() - stack.Finalize.Remove(id) -} - -// OutputMiddlewareOptions provides options for configuring output checksum -// validation middleware. -type OutputMiddlewareOptions struct { - // GetValidationMode is a function to get the checksum validation - // mode of the output payload from the input parameters. - // - // Given the input parameter value, the function must return the validation - // mode and true, or false if no mode is specified. - GetValidationMode func(interface{}) (string, bool) - - // ResponseChecksumValidation is the user config to opt-in/out response checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation - - // The set of checksum algorithms that should be used for response payload - // checksum validation. The algorithm(s) used will be a union of the - // output's returned algorithms and this set. - // - // Only the first algorithm in the union is currently used. - ValidationAlgorithms []string - - // If set the middleware will ignore output multipart checksums. Otherwise - // a checksum format error will be returned by the middleware. - IgnoreMultipartValidation bool - - // When set the middleware will log when output does not have checksum or - // algorithm to validate. - LogValidationSkipped bool - - // When set the middleware will log when the output contains a multipart - // checksum that was, skipped and not validated. - LogMultipartValidationSkipped bool -} - -// AddOutputMiddleware adds the middleware for validating response payload's -// checksum. -func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { - err := stack.Initialize.Add(&setupOutputContext{ - GetValidationMode: options.GetValidationMode, - ResponseChecksumValidation: options.ResponseChecksumValidation, - }, middleware.Before) - if err != nil { - return err - } - - // Resolve a supported priority order list of algorithms to validate. - algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms) - - m := &validateOutputPayloadChecksum{ - Algorithms: algorithms, - IgnoreMultipartValidation: options.IgnoreMultipartValidation, - LogMultipartValidationSkipped: options.LogMultipartValidationSkipped, - LogValidationSkipped: options.LogValidationSkipped, - } - - return stack.Deserialize.Add(m, middleware.After) -} - -// RemoveOutputMiddleware Removes the compute input payload checksum middleware -// handlers from the stack. -func RemoveOutputMiddleware(stack *middleware.Stack) { - id := (*setupOutputContext)(nil).ID() - stack.Initialize.Remove(id) - - id = (*validateOutputPayloadChecksum)(nil).ID() - stack.Deserialize.Remove(id) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go deleted file mode 100644 index 861a44293..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go +++ /dev/null @@ -1,90 +0,0 @@ -package checksum - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{ - AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32, - AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C, - AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1, - AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256, - AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64, -} - -// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage -type RequestChecksumMetricsTracking struct { - RequestChecksumCalculation aws.RequestChecksumCalculation - UserAgent *awsmiddleware.RequestUserAgent -} - -// ID provides the middleware identifier -func (m *RequestChecksumMetricsTracking) ID() string { - return "AWSChecksum:RequestMetricsTracking" -} - -// HandleBuild checks request checksum config and checksum value sent -// and sends corresponding feature id to user agent -func (m *RequestChecksumMetricsTracking) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - switch m.RequestChecksumCalculation { - case aws.RequestChecksumCalculationWhenSupported: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported) - case aws.RequestChecksumCalculationWhenRequired: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired) - } - - for algo, feat := range supportedChecksumFeatures { - checksumHeader := AlgorithmHTTPHeader(algo) - if checksum := req.Header.Get(checksumHeader); checksum != "" { - m.UserAgent.AddUserAgentFeature(feat) - } - } - - return next.HandleBuild(ctx, in) -} - -// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage -type ResponseChecksumMetricsTracking struct { - ResponseChecksumValidation aws.ResponseChecksumValidation - UserAgent *awsmiddleware.RequestUserAgent -} - -// ID provides the middleware identifier -func (m *ResponseChecksumMetricsTracking) ID() string { - return "AWSChecksum:ResponseMetricsTracking" -} - -// HandleBuild checks the response checksum config and sends corresponding feature id to user agent -func (m *ResponseChecksumMetricsTracking) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - switch m.ResponseChecksumValidation { - case aws.ResponseChecksumValidationWhenSupported: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported) - case aws.ResponseChecksumValidationWhenRequired: - m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired) - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go deleted file mode 100644 index 71a51bf74..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ /dev/null @@ -1,428 +0,0 @@ -package checksum - -import ( - "context" - "crypto/sha256" - "fmt" - "hash" - "io" - "strconv" - "strings" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" -) - -// computedInputChecksumsKey is the metadata key for recording the algorithm the -// checksum was computed for and the checksum value. -type computedInputChecksumsKey struct{} - -// GetComputedInputChecksums returns the map of checksum algorithm to their -// computed value stored in the middleware Metadata. Returns false if no values -// were stored in the Metadata. -func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) { - vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string) - return vs, ok -} - -// SetComputedInputChecksums stores the map of checksum algorithm to their -// computed value in the middleware Metadata. Overwrites any values that -// currently exist in the metadata. -func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { - m.Set(computedInputChecksumsKey{}, vs) -} - -// computeInputPayloadChecksum middleware computes payload checksum -type computeInputPayloadChecksum struct { - // Enables support for wrapping the serialized input payload with a - // content-encoding: aws-check wrapper, and including a trailer for the - // algorithm's checksum value. - // - // The checksum will not be computed, nor added as trailing checksum, if - // the Algorithm's header is already set on the request. - EnableTrailingChecksum bool - - // Enables support for computing the SHA256 checksum of input payloads - // along with the algorithm specified checksum. Prevents downstream - // middleware handlers (computePayloadSHA256) re-reading the payload. - // - // The SHA256 payload hash will only be used for computed for requests - // that are not TLS, or do not enable trailing checksums. - // - // The SHA256 payload hash will not be computed, if the Algorithm's header - // is already set on the request. - EnableComputePayloadHash bool - - // Enables support for setting the aws-chunked decoded content length - // header for the decoded length of the underlying stream. Will only be set - // when used with trailing checksums, and aws-chunked content-encoding. - EnableDecodedContentLengthHeader bool - - useTrailer bool -} - -type useTrailer struct{} - -// ID provides the middleware's identifier. -func (m *computeInputPayloadChecksum) ID() string { - return "AWSChecksum:ComputeInputPayloadChecksum" -} - -type computeInputHeaderChecksumError struct { - Msg string - Err error -} - -func (e computeInputHeaderChecksumError) Error() string { - const intro = "compute input header checksum failed" - - if e.Err != nil { - return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) - } - - return fmt.Sprintf("%s, %s", intro, e.Msg) -} -func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } - -// HandleBuild handles computing the payload's checksum, in the following cases: -// - Is HTTP, not HTTPS -// - RequireChecksum is true, and no checksums were specified via the Input -// - Trailing checksums are not supported -// -// The build handler must be inserted in the stack before ContentPayloadHash -// and after ComputeContentLength. -func (m *computeInputPayloadChecksum) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - var checksum string - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, err - } - if !ok { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, computeInputHeaderChecksumError{ - Msg: fmt.Sprintf("unknown request type %T", req), - } - } - - defer func() { - if algorithm == "" || checksum == "" || err != nil { - return - } - - // Record the checksum and algorithm that was computed - SetComputedInputChecksums(&metadata, map[string]string{ - string(algorithm): checksum, - }) - }() - - // If any checksum header is already set nothing to do. - for header := range req.Header { - h := strings.ToUpper(header) - if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") { - algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-")) - checksum = req.Header.Get(header) - return next.HandleFinalize(ctx, in) - } - } - - computePayloadHash := m.EnableComputePayloadHash - if v := v4.GetPayloadHash(ctx); v != "" { - computePayloadHash = false - } - - stream := req.GetStream() - streamLength, err := getRequestStreamLength(req) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to determine stream length", - Err: err, - } - } - - // If trailing checksums are supported, the request is HTTPS, and the - // stream is not nil or empty, instead switch to a trailing checksum. - // - // Nil and empty streams will always be handled as a request header, - // regardless if the operation supports trailing checksums or not. - if req.IsHTTPS() && !presignedurlcust.GetIsPresigning(ctx) { - if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { - if m.EnableComputePayloadHash { - // ContentSHA256Header middleware handles the header - ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) - } - m.useTrailer = true - ctx = middleware.WithStackValue(ctx, useTrailer{}, true) - return next.HandleFinalize(ctx, in) - } - - // If trailing checksums are not enabled but protocol is still HTTPS - // disabling computing the payload hash. Downstream middleware handler - // (ComputetPayloadHash) will set the payload hash to unsigned payload, - // if signing was used. - computePayloadHash = false - } - - // Only seekable streams are supported for non-trailing checksums, because - // the stream needs to be rewound before the handler can continue. - if stream != nil && !req.IsStreamSeekable() { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "unseekable stream is not supported without TLS and trailing checksum", - } - } - - var sha256Checksum string - checksum, sha256Checksum, err = computeStreamChecksum( - algorithm, stream, computePayloadHash) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to compute stream checksum", - Err: err, - } - } - - if err := req.RewindStream(); err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to rewind stream", - Err: err, - } - } - - checksumHeader := AlgorithmHTTPHeader(algorithm) - req.Header.Set(checksumHeader, checksum) - - if computePayloadHash { - ctx = v4.SetPayloadHash(ctx, sha256Checksum) - } - - return next.HandleFinalize(ctx, in) -} - -type computeInputTrailingChecksumError struct { - Msg string - Err error -} - -func (e computeInputTrailingChecksumError) Error() string { - const intro = "compute input trailing checksum failed" - - if e.Err != nil { - return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err) - } - - return fmt.Sprintf("%s, %s", intro, e.Msg) -} -func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } - -// addInputChecksumTrailer -// - Is HTTPS, not HTTP -// - A checksum was specified via the Input -// - Trailing checksums are supported. -type addInputChecksumTrailer struct { - EnableTrailingChecksum bool - EnableComputePayloadHash bool - EnableDecodedContentLengthHeader bool -} - -// ID identifies this middleware. -func (*addInputChecksumTrailer) ID() string { - return "addInputChecksumTrailer" -} - -// HandleFinalize wraps the request body to write the trailing checksum. -func (m *addInputChecksumTrailer) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to get algorithm", - Err: err, - } - } else if !ok { - return next.HandleFinalize(ctx, in) - } - - if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { - return next.HandleFinalize(ctx, in) - } - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, computeInputTrailingChecksumError{ - Msg: fmt.Sprintf("unknown request type %T", req), - } - } - - // Trailing checksums are only supported when TLS is enabled. - if !req.IsHTTPS() { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "HTTPS required", - } - } - - // If any checksum header is already set nothing to do. - for header := range req.Header { - if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") { - return next.HandleFinalize(ctx, in) - } - } - - stream := req.GetStream() - streamLength, err := getRequestStreamLength(req) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to determine stream length", - Err: err, - } - } - - if stream == nil || streamLength == 0 { - // Nil and empty streams are handled by the Build handler. They are not - // supported by the trailing checksums finalize handler. There is no - // benefit to sending them as trailers compared to headers. - return out, metadata, computeInputTrailingChecksumError{ - Msg: "nil or empty streams are not supported", - } - } - - checksumReader, err := newComputeChecksumReader(stream, algorithm) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to created checksum reader", - Err: err, - } - } - - awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader, - func(o *awsChunkedEncodingOptions) { - o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{ - Get: checksumReader.Base64Checksum, - Length: checksumReader.Base64ChecksumLength(), - } - o.StreamLength = streamLength - }) - - for key, values := range awsChunkedReader.HTTPHeaders() { - for _, value := range values { - req.Header.Add(key, value) - } - } - - // Setting the stream on the request will create a copy. The content length - // is not updated until after the request is copied to prevent impacting - // upstream middleware. - req, err = req.SetStream(awsChunkedReader) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed updating request to trailing checksum wrapped stream", - Err: err, - } - } - req.ContentLength = awsChunkedReader.EncodedLength() - in.Request = req - - // Add decoded content length header if original stream's content length is known. - if streamLength != -1 && m.EnableDecodedContentLengthHeader { - req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10)) - } - - out, metadata, err = next.HandleFinalize(ctx, in) - if err == nil { - checksum, err := checksumReader.Base64Checksum() - if err != nil { - return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err) - } - - // Record the checksum and algorithm that was computed - SetComputedInputChecksums(&metadata, map[string]string{ - string(algorithm): checksum, - }) - } - - return out, metadata, err -} - -func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { - ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx) - if ctxAlgorithm == "" { - return "", false, nil - } - - algorithm, err := ParseAlgorithm(ctxAlgorithm) - if err != nil { - return "", false, fmt.Errorf( - "failed to parse algorithm, %w", err) - } - - return algorithm, true, nil -} - -func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) ( - checksum string, sha256Checksum string, err error, -) { - hasher, err := NewAlgorithmHash(algorithm) - if err != nil { - return "", "", fmt.Errorf( - "failed to get hasher for checksum algorithm, %w", err) - } - - var sha256Hasher hash.Hash - var batchHasher io.Writer = hasher - - // Compute payload hash for the protocol. To prevent another handler - // (computePayloadSHA256) re-reading body also compute the SHA256 for - // request signing. If configured checksum algorithm is SHA256, don't - // double wrap stream with another SHA256 hasher. - if computePayloadHash && algorithm != AlgorithmSHA256 { - sha256Hasher = sha256.New() - batchHasher = io.MultiWriter(hasher, sha256Hasher) - } - - if stream != nil { - if _, err = io.Copy(batchHasher, stream); err != nil { - return "", "", fmt.Errorf( - "failed to read stream to compute hash, %w", err) - } - } - - checksum = string(base64EncodeHashSum(hasher)) - if computePayloadHash { - if algorithm != AlgorithmSHA256 { - sha256Checksum = string(hexEncodeHashSum(sha256Hasher)) - } else { - sha256Checksum = string(hexEncodeHashSum(hasher)) - } - } - - return checksum, sha256Checksum, nil -} - -func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { - if v := req.ContentLength; v > 0 { - return v, nil - } - - if length, ok, err := req.StreamLength(); err != nil { - return 0, fmt.Errorf("failed getting request stream's length, %w", err) - } else if ok { - return length, nil - } - - return -1, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go deleted file mode 100644 index 7fcd29a38..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ /dev/null @@ -1,114 +0,0 @@ -package checksum - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -const ( - checksumValidationModeEnabled = "ENABLED" -) - -// setupChecksumContext is the initial middleware that looks up the input -// used to configure checksum behavior. This middleware must be executed before -// input validation step or any other checksum middleware. -type setupInputContext struct { - // GetAlgorithm is a function to get the checksum algorithm of the - // input payload from the input parameters. - // - // Given the input parameter value, the function must return the algorithm - // and true, or false if no algorithm is specified. - GetAlgorithm func(interface{}) (string, bool) - - // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. - // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, - // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequireChecksum bool - - // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is - // set to true, checksum will be calculated and this field will be ignored, otherwise - // RequestChecksumCalculation will be used to indicate if checksum will be calculated - RequestChecksumCalculation aws.RequestChecksumCalculation -} - -// ID for the middleware -func (m *setupInputContext) ID() string { - return "AWSChecksum:SetupInputContext" -} - -// HandleInitialize initialization middleware that setups up the checksum -// context based on the input parameters provided in the stack. -func (m *setupInputContext) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if algorithm, ok := m.GetAlgorithm(in.Parameters); ok { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) - return next.HandleInitialize(ctx, in) - } - - if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported { - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) - } - - return next.HandleInitialize(ctx, in) -} - -type setupOutputContext struct { - // GetValidationMode is a function to get the checksum validation - // mode of the output payload from the input parameters. - // - // Given the input parameter value, the function must return the validation - // mode and true, or false if no mode is specified. - GetValidationMode func(interface{}) (string, bool) - - // ResponseChecksumValidation states user config to opt-in/out checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation -} - -// ID for the middleware -func (m *setupOutputContext) ID() string { - return "AWSChecksum:SetupOutputContext" -} - -// HandleInitialize initialization middleware that setups up the checksum -// context based on the input parameters provided in the stack. -func (m *setupOutputContext) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - - mode, _ := m.GetValidationMode(in.Parameters) - - if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled { - ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled) - } - - return next.HandleInitialize(ctx, in) -} - -// outputValidationModeKey is the key set on context used to identify if -// output checksum validation is enabled. -type outputValidationModeKey struct{} - -// setContextOutputValidationMode sets the request checksum -// algorithm on the context. -// -// Scoped to stack values. -func setContextOutputValidationMode(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, outputValidationModeKey{}, value) -} - -// getContextOutputValidationMode returns response checksum validation state, -// if one was specified. Empty string is returned if one is not specified. -// -// Scoped to stack values. -func getContextOutputValidationMode(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go deleted file mode 100644 index 14096a1ce..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go +++ /dev/null @@ -1,128 +0,0 @@ -package checksum - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms -// that were used, by the middleware's validation. -type outputValidationAlgorithmsUsedKey struct{} - -// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used -// stored in the middleware Metadata. Returns false if no algorithms were -// stored in the Metadata. -func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) { - vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string) - return vs, ok -} - -// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the -// middleware Metadata. -func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) { - m.Set(outputValidationAlgorithmsUsedKey{}, vs) -} - -// validateOutputPayloadChecksum middleware computes payload checksum of the -// received response and validates with checksum returned by the service. -type validateOutputPayloadChecksum struct { - // Algorithms represents a priority-ordered list of valid checksum - // algorithm that should be validated when present in HTTP response - // headers. - Algorithms []Algorithm - - // IgnoreMultipartValidation indicates multipart checksums ending with "-#" - // will be ignored. - IgnoreMultipartValidation bool - - // When set the middleware will log when output does not have checksum or - // algorithm to validate. - LogValidationSkipped bool - - // When set the middleware will log when the output contains a multipart - // checksum that was, skipped and not validated. - LogMultipartValidationSkipped bool -} - -func (m *validateOutputPayloadChecksum) ID() string { - return "AWSChecksum:ValidateOutputPayloadChecksum" -} - -// HandleDeserialize is a Deserialize middleware that wraps the HTTP response -// body with an io.ReadCloser that will validate its checksum. -func (m *validateOutputPayloadChecksum) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("unknown transport type %T", out.RawResponse), - } - } - - var expectedChecksum string - var algorithmToUse Algorithm - for _, algorithm := range m.Algorithms { - value := response.Header.Get(AlgorithmHTTPHeader(algorithm)) - if len(value) == 0 { - continue - } - - expectedChecksum = value - algorithmToUse = algorithm - } - - logger := middleware.GetLogger(ctx) - - // Skip validation if no checksum algorithm or checksum is available. - if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { - if m.LogValidationSkipped { - // TODO this probably should have more information about the - // operation output that won't be validated. - logger.Logf(logging.Warn, - "Response has no supported checksum. Not validating response payload.") - } - return out, metadata, nil - } - - // Ignore multipart validation - if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") { - if m.LogMultipartValidationSkipped { - // TODO this probably should have more information about the - // operation output that won't be validated. - logger.Logf(logging.Warn, "Skipped validation of multipart checksum.") - } - return out, metadata, nil - } - - body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum) - if err != nil { - return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err) - } - response.Body = body - - // Update the metadata to include the set of the checksum algorithms that - // will be validated. - SetOutputValidationAlgorithmsUsed(&metadata, []string{ - string(algorithmToUse), - }) - - return out, metadata, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md deleted file mode 100644 index 234a1efe2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ /dev/null @@ -1,401 +0,0 @@ -# v1.12.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.18 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.16 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.11 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.6 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.5 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2024-03-05) - -* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. - -# v1.11.3 (2024-03-04) - -* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API. - -# v1.11.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.37 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.36 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.35 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.34 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.33 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.32 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.31 (2023-07-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.30 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.29 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.28 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.27 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.26 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.25 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-11-06) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.1.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go deleted file mode 100644 index 5d5286f92..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go +++ /dev/null @@ -1,56 +0,0 @@ -package presignedurl - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -// WithIsPresigning adds the isPresigning sentinel value to a context to signal -// that the middleware stack is using the presign flow. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func WithIsPresigning(ctx context.Context) context.Context { - return middleware.WithStackValue(ctx, isPresigningKey{}, true) -} - -// GetIsPresigning returns if the context contains the isPresigning sentinel -// value for presigning flows. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetIsPresigning(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) - return v -} - -type isPresigningKey struct{} - -// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that -// will update the stack's context to be flagged as being invoked for the -// purpose of presigning. -func AddAsIsPresigningMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) -} - -// AddAsIsPresigingMiddleware is an alias for backwards compatibility. -// -// Deprecated: This API was released with a typo. Use -// [AddAsIsPresigningMiddleware] instead. -func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { - return AddAsIsPresigningMiddleware(stack) -} - -type asIsPresigningMiddleware struct{} - -func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } - -func (asIsPresigningMiddleware) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - ctx = WithIsPresigning(ctx) - return next.HandleInitialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go deleted file mode 100644 index 1b85375cf..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package presignedurl provides the customizations for API clients to fill in -// presigned URLs into input parameters. -package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go deleted file mode 100644 index 543343408..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package presignedurl - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go deleted file mode 100644 index 1e2f5c812..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go +++ /dev/null @@ -1,110 +0,0 @@ -package presignedurl - -import ( - "context" - "fmt" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - - "github.com/aws/smithy-go/middleware" -) - -// URLPresigner provides the interface to presign the input parameters in to a -// presigned URL. -type URLPresigner interface { - // PresignURL presigns a URL. - PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) -} - -// ParameterAccessor provides an collection of accessor to for retrieving and -// setting the values needed to PresignedURL generation -type ParameterAccessor struct { - // GetPresignedURL accessor points to a function that retrieves a presigned url if present - GetPresignedURL func(interface{}) (string, bool, error) - - // GetSourceRegion accessor points to a function that retrieves source region for presigned url - GetSourceRegion func(interface{}) (string, bool, error) - - // CopyInput accessor points to a function that takes in an input, and returns a copy. - CopyInput func(interface{}) (interface{}, error) - - // SetDestinationRegion accessor points to a function that sets destination region on api input struct - SetDestinationRegion func(interface{}, string) error - - // SetPresignedURL accessor points to a function that sets presigned url on api input struct - SetPresignedURL func(interface{}, string) error -} - -// Options provides the set of options needed by the presigned URL middleware. -type Options struct { - // Accessor are the parameter accessors used by this middleware - Accessor ParameterAccessor - - // Presigner is the URLPresigner used by the middleware - Presigner URLPresigner -} - -// AddMiddleware adds the Presign URL middleware to the middleware stack. -func AddMiddleware(stack *middleware.Stack, opts Options) error { - return stack.Initialize.Add(&presign{options: opts}, middleware.Before) -} - -// RemoveMiddleware removes the Presign URL middleware from the stack. -func RemoveMiddleware(stack *middleware.Stack) error { - _, err := stack.Initialize.Remove((*presign)(nil).ID()) - return err -} - -type presign struct { - options Options -} - -func (m *presign) ID() string { return "Presign" } - -func (m *presign) HandleInitialize( - ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, -) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // If PresignedURL is already set ignore middleware. - if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if ok { - return next.HandleInitialize(ctx, input) - } - - // If have source region is not set ignore middleware. - srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } else if !ok || len(srcRegion) == 0 { - return next.HandleInitialize(ctx, input) - } - - // Create a copy of the original input so the destination region value can - // be added. This ensures that value does not leak into the original - // request parameters. - paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Destination region is the API client's configured region. - dstRegion := awsmiddleware.GetRegion(ctx) - if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) - if err != nil { - return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) - } - - // Update the original input with the presigned URL value. - if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { - return out, metadata, fmt.Errorf("presign middleware failed, %w", err) - } - - return next.HandleInitialize(ctx, input) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md deleted file mode 100644 index bdb9bdcd1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ /dev/null @@ -1,403 +0,0 @@ -# v1.18.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.18 (2024-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.17 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.16 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.15 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.14 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.13 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.12 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.11 (2024-06-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.10 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.9 (2024-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.8 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.7 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2024-03-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2024-02-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2023-12-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2023-11-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2023-11-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.6 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.5 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.3 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.1 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-03-21) - -* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.24 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.23 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.22 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.21 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.20 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.19 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.18 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.17 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.16 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.15 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.14 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.13 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.12 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.11 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.10 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-02-24) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.2 (2021-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-09-02) - -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. - -# v1.6.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-06-04) - -* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. - -# v1.3.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go deleted file mode 100644 index ec290b213..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go +++ /dev/null @@ -1,53 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// AccessPointARN provides representation -type AccessPointARN struct { - arn.ARN - AccessPointName string -} - -// GetARN returns the base ARN for the Access Point resource -func (a AccessPointARN) GetARN() arn.ARN { - return a.ARN -} - -// ParseAccessPointResource attempts to parse the ARN's resource as an -// AccessPoint resource. -// -// Supported Access point resource format: -// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} -// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint -func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { - if isFIPS(a.Region) { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} - } - if len(a.AccountID) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - if len(resParts) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - if len(resParts) > 1 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - resID := resParts[0] - if len(strings.TrimSpace(resID)) == 0 { - return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} - } - - return AccessPointARN{ - ARN: a, - AccessPointName: resID, - }, nil -} - -func isFIPS(region string) bool { - return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go deleted file mode 100644 index 06e1a3add..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go +++ /dev/null @@ -1,85 +0,0 @@ -package arn - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -var supportedServiceARN = []string{ - "s3", - "s3-outposts", - "s3-object-lambda", -} - -func isSupportedServiceARN(service string) bool { - for _, name := range supportedServiceARN { - if name == service { - return true - } - } - return false -} - -// Resource provides the interfaces abstracting ARNs of specific resource -// types. -type Resource interface { - GetARN() arn.ARN - String() string -} - -// ResourceParser provides the function for parsing an ARN's resource -// component into a typed resource. -type ResourceParser func(arn.ARN) (Resource, error) - -// ParseResource parses an AWS ARN into a typed resource for the S3 API. -func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) { - if len(a.Partition) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "partition not set"} - } - - if !isSupportedServiceARN(a.Service) { - return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} - } - - if len(a.Resource) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "resource not set"} - } - - return resParser(a) -} - -// SplitResource splits the resource components by the ARN resource delimiters. -func SplitResource(v string) []string { - var parts []string - var offset int - - for offset <= len(v) { - idx := strings.IndexAny(v[offset:], "/:") - if idx < 0 { - parts = append(parts, v[offset:]) - break - } - parts = append(parts, v[offset:idx+offset]) - offset += idx + 1 - } - - return parts -} - -// IsARN returns whether the given string is an ARN -func IsARN(s string) bool { - return arn.IsARN(s) -} - -// InvalidARNError provides the error for an invalid ARN error. -type InvalidARNError struct { - ARN arn.ARN - Reason string -} - -// Error returns a string denoting the occurred InvalidARNError -func (e InvalidARNError) Error() string { - return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go deleted file mode 100644 index 9a3258e15..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go +++ /dev/null @@ -1,32 +0,0 @@ -package arn - -import "fmt" - -// arnable is implemented by the relevant S3/S3Control -// operations which have members that may need ARN -// processing. -type arnable interface { - SetARNMember(string) error - GetARNMember() (*string, bool) -} - -// GetARNField would be called during middleware execution -// to retrieve a member value that is an ARN in need of -// processing. -func GetARNField(input interface{}) (*string, bool) { - v, ok := input.(arnable) - if !ok { - return nil, false - } - return v.GetARNMember() -} - -// SetARNField would called during middleware exeuction -// to set a member value that required ARN processing. -func SetARNField(input interface{}, v string) error { - params, ok := input.(arnable) - if !ok { - return fmt.Errorf("Params does not contain arn field member") - } - return params.SetARNMember(v) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go deleted file mode 100644 index e06a30285..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go +++ /dev/null @@ -1,128 +0,0 @@ -package arn - -import ( - "strings" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// OutpostARN interface that should be satisfied by outpost ARNs -type OutpostARN interface { - Resource - GetOutpostID() string -} - -// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format -// and return a specific OutpostARN type -// -// Currently supported outpost ARN formats: -// * Outpost AccessPoint ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint -// -// * Outpost Bucket ARN format: -// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} -// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket -// -// Other outpost ARN formats may be supported and added in the future. -func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { - if len(a.Region) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "region not set"} - } - - if isFIPS(a.Region) { - return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} - } - - if len(a.AccountID) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} - } - - // verify if outpost id is present and valid - if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { - return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - // verify possible resource type exists - if len(resParts) < 3 { - return nil, InvalidARNError{ - ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", - } - } - - // Since we know this is a OutpostARN fetch outpostID - outpostID := strings.TrimSpace(resParts[0]) - - switch resParts[1] { - case "accesspoint": - accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return OutpostAccessPointARN{}, err - } - return OutpostAccessPointARN{ - AccessPointARN: accesspointARN, - OutpostID: outpostID, - }, nil - - case "bucket": - bucketName, err := parseBucketResource(a, resParts[2:]) - if err != nil { - return nil, err - } - return OutpostBucketARN{ - ARN: a, - BucketName: bucketName, - OutpostID: outpostID, - }, nil - - default: - return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} - } -} - -// OutpostAccessPointARN represents outpost access point ARN. -type OutpostAccessPointARN struct { - AccessPointARN - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost access point arn -func (o OutpostAccessPointARN) GetOutpostID() string { - return o.OutpostID -} - -// OutpostBucketARN represents the outpost bucket ARN. -type OutpostBucketARN struct { - arn.ARN - BucketName string - OutpostID string -} - -// GetOutpostID returns the outpost id of outpost bucket arn -func (o OutpostBucketARN) GetOutpostID() string { - return o.OutpostID -} - -// GetARN retrives the base ARN from outpost bucket ARN resource -func (o OutpostBucketARN) GetARN() arn.ARN { - return o.ARN -} - -// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the -// bucket resource id. -// -// parseBucketResource only parses the bucket resource id. -func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { - if len(resParts) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - if len(resParts) > 1 { - return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} - } - - bucketName = strings.TrimSpace(resParts[0]) - if len(bucketName) == 0 { - return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} - } - return bucketName, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go deleted file mode 100644 index 513154cc0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go +++ /dev/null @@ -1,15 +0,0 @@ -package arn - -// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service -type S3ObjectLambdaARN interface { - Resource - - isS3ObjectLambdasARN() -} - -// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type -type S3ObjectLambdaAccessPointARN struct { - AccessPointARN -} - -func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go deleted file mode 100644 index b51532085..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go +++ /dev/null @@ -1,73 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - - "github.com/aws/aws-sdk-go-v2/aws/arn" -) - -// ARNLookup is the initial middleware that looks up if an arn is provided. -// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on -// middleware context. This middleware must be executed before input validation step or any other -// arn processing middleware. -type ARNLookup struct { - - // GetARNValue takes in a input interface and returns a ptr to string and a bool - GetARNValue func(interface{}) (*string, bool) -} - -// ID for the middleware -func (m *ARNLookup) ID() string { - return "S3Shared:ARNLookup" -} - -// HandleInitialize handles the behavior of this initialize step -func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - // check if GetARNValue is supported - if m.GetARNValue == nil { - return next.HandleInitialize(ctx, in) - } - - // check is input resource is an ARN; if not go to next - v, ok := m.GetARNValue(in.Parameters) - if !ok || v == nil || !arn.IsARN(*v) { - return next.HandleInitialize(ctx, in) - } - - // if ARN process ResourceRequest and put it on ctx - av, err := arn.Parse(*v) - if err != nil { - return out, metadata, fmt.Errorf("error parsing arn: %w", err) - } - // set parsed arn on context - ctx = setARNResourceOnContext(ctx, av) - - return next.HandleInitialize(ctx, in) -} - -// arnResourceKey is the key set on context used to identify, retrive an ARN resource -// if present on the context. -type arnResourceKey struct{} - -// SetARNResourceOnContext sets the S3 ARN on the context. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context { - return middleware.WithStackValue(ctx, arnResourceKey{}, value) -} - -// GetARNResourceFromContext returns an ARN from context and a bool indicating -// presence of ARN on ctx. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) { - v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN) - return v, ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go deleted file mode 100644 index b5d31f5c5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go +++ /dev/null @@ -1,41 +0,0 @@ -package config - -import "context" - -// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion -type UseARNRegionProvider interface { - GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) -} - -// DisableMultiRegionAccessPointsProvider is an interface for retrieving external configuration value for DisableMultiRegionAccessPoints -type DisableMultiRegionAccessPointsProvider interface { - GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value bool, found bool, err error) -} - -// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(UseARNRegionProvider); ok { - value, found, err = p.GetS3UseARNRegion(ctx) - if err != nil || found { - break - } - } - } - return -} - -// ResolveDisableMultiRegionAccessPoints extracts the first instance of a DisableMultiRegionAccessPoints from the config slice. -// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. -func ResolveDisableMultiRegionAccessPoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { - for _, cfg := range configs { - if p, ok := cfg.(DisableMultiRegionAccessPointsProvider); ok { - value, found, err = p.GetS3DisableMultiRegionAccessPoints(ctx) - if err != nil || found { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go deleted file mode 100644 index aa0c3714e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go +++ /dev/null @@ -1,183 +0,0 @@ -package s3shared - -import ( - "fmt" - - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -// TODO: fix these error statements to be relevant to v2 sdk - -const ( - invalidARNErrorErrCode = "InvalidARNError" - configurationErrorErrCode = "ConfigurationError" -) - -// InvalidARNError denotes the error for Invalid ARN -type InvalidARNError struct { - message string - resource arn.Resource - origErr error -} - -// Error returns the InvalidARN error string -func (e InvalidARNError) Error() string { - var extra string - if e.resource != nil { - extra = "ARN: " + e.resource.String() - } - msg := invalidARNErrorErrCode + " : " + e.message - if extra != "" { - msg = msg + "\n\t" + extra - } - - return msg -} - -// OrigErr is the original error wrapped by Invalid ARN Error -func (e InvalidARNError) Unwrap() error { - return e.origErr -} - -// NewInvalidARNError denotes invalid arn error -func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "invalid ARN", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition -func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for the target ARN partition", - origErr: err, - resource: resource, - } -} - -// NewInvalidARNWithFIPSError ARN not supported for FIPS region -// -// Deprecated: FIPS will not appear in the ARN region component. -func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { - return InvalidARNError{ - message: "resource ARN not supported for FIPS region", - resource: resource, - origErr: err, - } -} - -// ConfigurationError is used to denote a client configuration error -type ConfigurationError struct { - message string - resource arn.Resource - clientPartitionID string - clientRegion string - origErr error -} - -// Error returns the Configuration error string -func (e ConfigurationError) Error() string { - extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", - e.resource, e.clientPartitionID, e.clientRegion) - - msg := configurationErrorErrCode + " : " + e.message - if extra != "" { - msg = msg + "\n\t" + extra - } - return msg -} - -// OrigErr is the original error wrapped by Configuration Error -func (e ConfigurationError) Unwrap() error { - return e.origErr -} - -// NewClientPartitionMismatchError stub -func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client partition does not match provided ARN partition", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientRegionMismatchError denotes cross region access error -func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client region does not match provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFailedToResolveEndpointError denotes endpoint resolving error -func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "endpoint resolver failed to find an endpoint for the provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access -func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for fips but cross-region resource ARN provided", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS -func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "use of ARN is not supported when client or request is configured for FIPS", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate -func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Accelerate but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request -func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack -func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { - return ConfigurationError{ - message: "client configured for S3 Dual-stack but is not supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go deleted file mode 100644 index f1954485c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package s3shared - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go deleted file mode 100644 index 85b60d2a1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go +++ /dev/null @@ -1,29 +0,0 @@ -package s3shared - -import ( - "github.com/aws/smithy-go/middleware" -) - -// hostID is used to retrieve host id from response metadata -type hostID struct { -} - -// SetHostIDMetadata sets the provided host id over middleware metadata -func SetHostIDMetadata(metadata *middleware.Metadata, id string) { - metadata.Set(hostID{}, id) -} - -// GetHostIDMetadata retrieves the host id from middleware metadata -// returns host id as string along with a boolean indicating presence of -// hostId on middleware metadata. -func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { - if !metadata.Has(hostID{}) { - return "", false - } - - v, ok := metadata.Get(hostID{}).(string) - if !ok { - return "", true - } - return v, true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go deleted file mode 100644 index f02604cb6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go +++ /dev/null @@ -1,28 +0,0 @@ -package s3shared - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -// clonedInputKey used to denote if request input was cloned. -type clonedInputKey struct{} - -// SetClonedInputKey sets a key on context to denote input was cloned previously. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetClonedInputKey(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, clonedInputKey{}, value) -} - -// IsClonedInput retrieves if context key for cloned input was set. -// If set, we can infer that the reuqest input was cloned previously. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func IsClonedInput(ctx context.Context) bool { - v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go deleted file mode 100644 index 7251588b0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go +++ /dev/null @@ -1,57 +0,0 @@ -package s3shared - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const metadataRetrieverID = "S3MetadataRetriever" - -// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware -func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error { - // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as - // host id, request id from response header returned by operation deserializers - return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before) -} - -type metadataRetriever struct { -} - -// ID returns the middleware identifier -func (m *metadataRetriever) ID() string { - return metadataRetrieverID -} - -func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - - span, _ := tracing.GetSpan(ctx) - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // check for header for Request id - if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { - // set reqID on metadata for successful responses. - awsmiddleware.SetRequestIDMetadata(&metadata, v) - span.SetProperty("aws.request_id", v) - } - - // look up host-id - if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { - // set reqID on metadata for successful responses. - SetHostIDMetadata(&metadata, v) - span.SetProperty("aws.extended_request_id", v) - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go deleted file mode 100644 index bee8da3fe..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go +++ /dev/null @@ -1,77 +0,0 @@ -package s3shared - -import ( - "fmt" - "strings" - - awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -// ResourceRequest represents an ARN resource and api request metadata -type ResourceRequest struct { - Resource arn.Resource - // RequestRegion is the region configured on the request config - RequestRegion string - - // SigningRegion is the signing region resolved for the request - SigningRegion string - - // PartitionID is the resolved partition id for the provided request region - PartitionID string - - // UseARNRegion indicates if client should use the region provided in an ARN resource - UseARNRegion bool - - // UseFIPS indicates if te client is configured for FIPS - UseFIPS bool -} - -// ARN returns the resource ARN -func (r ResourceRequest) ARN() awsarn.ARN { - return r.Resource.GetARN() -} - -// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS -// -// Deprecated: FIPS will not be present in the ARN region -func (r ResourceRequest) ResourceConfiguredForFIPS() bool { - return IsFIPS(r.ARN().Region) -} - -// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set -func (r ResourceRequest) AllowCrossRegion() bool { - return r.UseARNRegion -} - -// IsCrossPartition returns true if request is configured for region of another partition, than -// the partition that resource ARN region resolves to. IsCrossPartition will not return an error, -// if request is not configured with a specific partition id. This might happen if customer provides -// custom endpoint url, but does not associate a partition id with it. -func (r ResourceRequest) IsCrossPartition() (bool, error) { - rv := r.PartitionID - if len(rv) == 0 { - return false, nil - } - - av := r.Resource.GetARN().Partition - if len(av) == 0 { - return false, fmt.Errorf("no partition id for provided ARN") - } - - return !strings.EqualFold(rv, av), nil -} - -// IsCrossRegion returns true if request signing region is not same as arn region -func (r ResourceRequest) IsCrossRegion() bool { - v := r.SigningRegion - return !strings.EqualFold(v, r.Resource.GetARN().Region) -} - -// IsFIPS returns true if region is a fips pseudo-region -// -// Deprecated: FIPS should be specified via EndpointOptions. -func IsFIPS(region string) bool { - return strings.HasPrefix(region, "fips-") || - strings.HasSuffix(region, "-fips") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go deleted file mode 100644 index 857336243..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package s3shared - -import ( - "errors" - "fmt" - - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" -) - -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. -type ResponseError struct { - *awshttp.ResponseError - - // HostID associated with response error - HostID string -} - -// ServiceHostID returns the host id associated with Response Error -func (e *ResponseError) ServiceHostID() string { return e.HostID } - -// Error returns the formatted error -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v", - e.Response.StatusCode, e.RequestID, e.HostID, e.Err) -} - -// As populates target and returns true if the type of target is a error type that -// the ResponseError embeds, (e.g.S3 HTTP ResponseError) -func (e *ResponseError) As(target interface{}) bool { - return errors.As(e.ResponseError, target) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go deleted file mode 100644 index 543576245..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go +++ /dev/null @@ -1,60 +0,0 @@ -package s3shared - -import ( - "context" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddResponseErrorMiddleware adds response error wrapper middleware -func AddResponseErrorMiddleware(stack *middleware.Stack) error { - // add error wrapper middleware before request id retriever middleware so that it can wrap the error response - // returned by operation deserializers - return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before) -} - -type errorWrapper struct { -} - -// ID returns the middleware identifier -func (m *errorWrapper) ID() string { - return "ResponseErrorWrapper" -} - -func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err == nil { - // Nothing to do when there is no error. - return out, metadata, err - } - - resp, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - // No raw response to wrap with. - return out, metadata, err - } - - // look for request id in metadata - reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) - // look for host id in metadata - hostID, _ := GetHostIDMetadata(metadata) - - // Wrap the returned smithy error with the request id retrieved from the metadata - err = &ResponseError{ - ResponseError: &awshttp.ResponseError{ - ResponseError: &smithyhttp.ResponseError{ - Response: resp, - Err: err, - }, - RequestID: reqID, - }, - HostID: hostID, - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go deleted file mode 100644 index 0f43ec0d4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go +++ /dev/null @@ -1,54 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const s3100ContinueID = "S3100Continue" -const default100ContinueThresholdBytes int64 = 1024 * 1024 * 2 - -// Add100Continue add middleware, which adds {Expect: 100-continue} header for s3 client HTTP PUT request larger than 2MB -// or with unknown size streaming bodies, during operation builder step -func Add100Continue(stack *middleware.Stack, continueHeaderThresholdBytes int64) error { - return stack.Build.Add(&s3100Continue{ - continueHeaderThresholdBytes: continueHeaderThresholdBytes, - }, middleware.After) -} - -type s3100Continue struct { - continueHeaderThresholdBytes int64 -} - -// ID returns the middleware identifier -func (m *s3100Continue) ID() string { - return s3100ContinueID -} - -func (m *s3100Continue) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - sizeLimit := default100ContinueThresholdBytes - switch { - case m.continueHeaderThresholdBytes == -1: - return next.HandleBuild(ctx, in) - case m.continueHeaderThresholdBytes > 0: - sizeLimit = m.continueHeaderThresholdBytes - default: - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - if req.ContentLength == -1 || (req.ContentLength == 0 && req.Body != nil) || req.ContentLength >= sizeLimit { - req.Header.Set("Expect", "100-continue") - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go deleted file mode 100644 index 22773199f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go +++ /dev/null @@ -1,78 +0,0 @@ -package s3shared - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - - awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" -) - -// EnableDualstack represents middleware struct for enabling dualstack support -// -// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support -type EnableDualstack struct { - // UseDualstack indicates if dualstack endpoint resolving is to be enabled - UseDualstack bool - - // DefaultServiceID is the service id prefix used in endpoint resolving - // by default service-id is 's3' and 's3-control' for service s3, s3control. - DefaultServiceID string -} - -// ID returns the middleware ID. -func (*EnableDualstack) ID() string { - return "EnableDualstack" -} - -// HandleSerialize handles serializer middleware behavior when middleware is executed -func (u *EnableDualstack) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - - // check for host name immutable property - if smithyhttp.GetHostnameImmutable(ctx) { - return next.HandleSerialize(ctx, in) - } - - serviceID := awsmiddle.GetServiceID(ctx) - - // s3-control may be represented as `S3 Control` as in model - if serviceID == "S3 Control" { - serviceID = "s3-control" - } - - if len(serviceID) == 0 { - // default service id - serviceID = u.DefaultServiceID - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - if u.UseDualstack { - parts := strings.Split(req.URL.Host, ".") - if len(parts) < 3 { - return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host) - } - - for i := 0; i+1 < len(parts); i++ { - if strings.EqualFold(parts[i], serviceID) { - parts[i] = parts[i] + ".dualstack" - break - } - } - - // construct the url host - req.URL.Host = strings.Join(parts, ".") - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go deleted file mode 100644 index 65fd07e00..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go +++ /dev/null @@ -1,89 +0,0 @@ -package s3shared - -import ( - "encoding/xml" - "fmt" - "io" - "net/http" - "strings" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string `xml:"Code"` - Message string `xml:"Message"` - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` -} - -// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body -func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { - var errComponents ErrorComponents - if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) - } - return errComponents, nil -} - -// GetWrappedErrorResponseComponents returns the error fields from an xml error response body -// in which error code, and message are wrapped by a tag -func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) { - var errComponents struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - } - - if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err) - } - - return ErrorComponents{ - Code: errComponents.Code, - Message: errComponents.Message, - RequestID: errComponents.RequestID, - HostID: errComponents.HostID, - }, nil -} - -// GetErrorResponseComponents retrieves error components according to passed in options -func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) { - var errComponents ErrorComponents - var err error - - if options.IsWrappedWithErrorTag { - errComponents, err = GetWrappedErrorResponseComponents(r) - } else { - errComponents, err = GetUnwrappedErrorResponseComponents(r) - } - - if err != nil { - return ErrorComponents{}, err - } - - // If an error code or message is not retrieved, it is derived from the http status code - // eg, for S3 service, we derive err code and message, if none is found - if options.UseStatusCode && len(errComponents.Code) == 0 && - len(errComponents.Message) == 0 { - // derive code and message from status code - statusText := http.StatusText(options.StatusCode) - errComponents.Code = strings.Replace(statusText, " ", "", -1) - errComponents.Message = statusText - } - return errComponents, nil -} - -// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service -type ErrorResponseDeserializerOptions struct { - // UseStatusCode denotes if status code should be used to retrieve error code, msg - UseStatusCode bool - - // StatusCode is status code of error response - StatusCode int - - //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an - // additional tag - IsWrappedWithErrorTag bool -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md deleted file mode 100644 index 61a21a5d9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ /dev/null @@ -1,760 +0,0 @@ -# v1.73.0 (2025-01-15) - -* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. -* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.72.3 (2025-01-14) - -* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. - -# v1.72.2 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.72.1 (2025-01-08) - -* No change notes available for this release. - -# v1.72.0 (2025-01-03) - -* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change. - -# v1.71.1 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.71.0 (2024-12-03.2) - -* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets. - -# v1.70.0 (2024-12-02) - -* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.69.0 (2024-11-25) - -* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications. - -# v1.68.0 (2024-11-21) - -* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. - -# v1.67.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.67.0 (2024-11-14) - -* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas. - -# v1.66.3 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.66.2 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.66.1 (2024-10-25) - -* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2 - -# v1.66.0 (2024-10-16) - -* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response. - -# v1.65.3 (2024-10-11) - -* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types - -# v1.65.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.65.1 (2024-10-07) - -* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.65.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.64.1 (2024-10-03) - -* No change notes available for this release. - -# v1.64.0 (2024-10-02) - -* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions. - -# v1.63.3 (2024-09-27) - -* No change notes available for this release. - -# v1.63.2 (2024-09-25) - -* No change notes available for this release. - -# v1.63.1 (2024-09-23) - -* No change notes available for this release. - -# v1.63.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.62.0 (2024-09-18) - -* **Feature**: Added SSE-KMS support for directory buckets. - -# v1.61.3 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.61.2 (2024-09-04) - -* No change notes available for this release. - -# v1.61.1 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.61.0 (2024-08-28) - -* **Feature**: Add presignPost for s3 PutObject - -# v1.60.1 (2024-08-22) - -* No change notes available for this release. - -# v1.60.0 (2024-08-20) - -* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs. - -# v1.59.0 (2024-08-15) - -* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.3 (2024-08-02) - -* **Bug Fix**: Add assurance tests for auth scheme selection logic. - -# v1.58.2 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.1 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.58.0 (2024-07-02) - -* **Feature**: Added response overrides to Head Object requests. - -# v1.57.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.57.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.56.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.56.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.2 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.1 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.55.0 (2024-06-05) - -* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. -* **Bug Fix**: Add S3-specific smithy protocol tests. - -# v1.54.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.3 (2024-05-23) - -* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). - -# v1.54.2 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.1 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.54.0 (2024-05-14) - -* **Feature**: Updated a few x-id in the http uri traits - -# v1.53.2 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.53.1 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.53.0 (2024-03-18) - -* **Feature**: Fix two issues with response root node names. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.52.1 (2024-03-15) - -* **Documentation**: Documentation updates for Amazon S3. - -# v1.52.0 (2024-03-13) - -* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT). - -# v1.51.4 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.3 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.2 (2024-03-04) - -* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.51.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.50.3 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.50.2 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.50.1 (2024-02-19) - -* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials. - -# v1.50.0 (2024-02-16) - -* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. - -# v1.49.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.48.1 (2024-01-24) - -* No change notes available for this release. - -# v1.48.0 (2024-01-05) - -* **Feature**: Support smithy sigv4a trait for codegen. - -# v1.47.8 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.7 (2023-12-20) - -* No change notes available for this release. - -# v1.47.6 (2023-12-18) - -* No change notes available for this release. - -# v1.47.5 (2023-12-08) - -* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver. -* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios. -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.47.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.47.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.47.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.46.0 (2023-11-28.2) - -* **Feature**: Add S3Express support. -* **Feature**: Adds support for S3 Express One Zone. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.45.1 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.45.0 (2023-11-27) - -* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality. - -# v1.44.0 (2023-11-21) - -* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs. -* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators. - -# v1.43.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.43.0 (2023-11-17) - -* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. -* **Feature**: Removes all default 0 values for numbers and false values for booleans - -# v1.42.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.42.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.42.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.41.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.40.0 (2023-09-26) - -* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. - -# v1.39.0 (2023-09-20) - -* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException - -# v1.38.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.38.1 (2023-08-01) - -* No change notes available for this release. - -# v1.38.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.37.1 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.37.0 (2023-07-13) - -* **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.36.0 (2023-06-28) - -* **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. - -# v1.35.0 (2023-06-16) - -* **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. - -# v1.34.1 (2023-06-15) - -* No change notes available for this release. - -# v1.34.0 (2023-06-13) - -* **Feature**: Integrate double encryption feature to SDKs. -* **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.1 (2023-05-04) - -* **Documentation**: Documentation updates for Amazon S3 - -# v1.33.0 (2023-04-24) - -* **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.0 (2023-04-19) - -* **Feature**: Provides support for "Snow" Storage class. - -# v1.31.3 (2023-04-10) - -* No change notes available for this release. - -# v1.31.2 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.1 (2023-03-31) - -* **Documentation**: Documentation updates for Amazon S3 - -# v1.31.0 (2023-03-21) - -* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.30.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2023-02-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2023-01-23) - -* No change notes available for this release. - -# v1.30.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.29.6 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.5 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.4 (2022-11-22) - -* No change notes available for this release. - -# v1.29.3 (2022-11-16) - -* No change notes available for this release. - -# v1.29.2 (2022-11-10) - -* No change notes available for this release. - -# v1.29.1 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.0 (2022-10-21) - -* **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket. -* **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2022-10-19) - -* **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters. - -# v1.27.11 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.10 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.9 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.8 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.7 (2022-08-30) - -* No change notes available for this release. - -# v1.27.6 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.5 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.3 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.2 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.0 (2022-07-01) - -* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). - -# v1.26.12 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.11 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.10 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.9 (2022-05-06) - -* No change notes available for this release. - -# v1.26.8 (2022-05-03) - -* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. - -# v1.26.7 (2022-04-27) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2022-04-12) - -* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. - -# v1.26.4 (2022-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2022-01-28) - -* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR. - -# v1.24.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. -* **Feature**: Updated to latest service endpoints - -# v1.21.0 (2021-12-02) - -* **Feature**: API client updated -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.19.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2021-11-12) - -* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature. - -# v1.18.0 (2021-11-06) - -* **Feature**: Support has been added for the SelectObjectContent API. -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2021-09-10) - -* No change notes available for this release. - -# v1.15.0 (2021-09-02) - -* **Feature**: API client updated -* **Feature**: Add support for S3 Multi-Region Access Point ARNs. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2021-08-19) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-08-04) - -* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346)) -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-06-04) - -* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated. -* **Feature**: Updated service client to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2021-05-25) - -* **Feature**: API client updated - -# v1.8.0 (2021-05-20) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Feature**: Updated to latest service API model. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go deleted file mode 100644 index 5cadcf369..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ /dev/null @@ -1,1303 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "S3" -const ServiceAPIVersion = "2006-03-01" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3") -} - -// Client provides the API client to make operations call for Amazon Simple -// Storage Service. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveHTTPSignerV4a(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - resolveExpressCredentials(&options) - - finalizeServiceEndpointAuthResolver(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - finalizeExpressCredentials(&options, client) - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - setSafeEventStreamClientLogMode(&options, opID) - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - finalizeOperationExpressCredentials(&options, *c) - - finalizeOperationEndpointAuthResolver(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{ - Signer: options.httpSignerV4a, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - RequestChecksumCalculation: cfg.RequestChecksumCalculation, - ResponseChecksumValidation: cfg.ResponseChecksumValidation, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseARNRegion(cfg, &opts) - resolveDisableMultiRegionAccessPoints(cfg, &opts) - resolveDisableExpressAuth(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - so.DisableURIPathEscaping = true - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves UseARNRegion S3 configuration -func resolveUseARNRegion(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.UseARNRegion = value - } - return nil -} - -// resolves DisableMultiRegionAccessPoints S3 configuration -func resolveDisableMultiRegionAccessPoints(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := s3sharedconfig.ResolveDisableMultiRegionAccessPoints(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.DisableMultiRegionAccessPoints = value - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -type httpSignerV4a interface { - SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, - service string, regionSet []string, signingTime time.Time, - optFns ...func(*v4a.SignerOptions)) error -} - -func resolveHTTPSignerV4a(o *Options) { - if o.httpSignerV4a != nil { - return - } - o.httpSignerV4a = newDefaultV4aSigner(*o) -} - -func newDefaultV4aSigner(o Options) *v4a.Signer { - return v4a.NewSigner(func(so *v4a.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{ - RequestChecksumCalculation: options.RequestChecksumCalculation, - UserAgent: ua, - }, "UserAgent", middleware.Before) -} - -func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{ - ResponseChecksumValidation: options.ResponseChecksumValidation, - UserAgent: ua, - }, "UserAgent", middleware.Before) -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { - return s3shared.AddMetadataRetrieverMiddleware(stack) -} - -func add100Continue(stack *middleware.Stack, options Options) error { - return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -// ComputedInputChecksumsMetadata provides information about the algorithms used -// to compute the checksum(s) of the input payload. -type ComputedInputChecksumsMetadata struct { - // ComputedChecksums is a map of algorithm name to checksum value of the computed - // input payload's checksums. - ComputedChecksums map[string]string -} - -// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of -// algorithms and input payload checksums values. -func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) { - values, ok := internalChecksum.GetComputedInputChecksums(m) - if !ok { - return ComputedInputChecksumsMetadata{}, false - } - return ComputedInputChecksumsMetadata{ - ComputedChecksums: values, - }, true - -} - -// ChecksumValidationMetadata contains metadata such as the checksum algorithm -// used for data integrity validation. -type ChecksumValidationMetadata struct { - // AlgorithmsUsed is the set of the checksum algorithms used to validate the - // response payload. The response payload must be completely read in order for the - // checksum validation to be performed. An error is returned by the operation - // output's response io.ReadCloser if the computed checksums are invalid. - AlgorithmsUsed []string -} - -// GetChecksumValidationMetadata returns the set of algorithms that will be used -// to validate the response payload with. The response payload must be completely -// read in order for the checksum validation to be performed. An error is returned -// by the operation output's response io.ReadCloser if the computed checksums are -// invalid. Returns false if no checksum algorithm used metadata was found. -func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { - values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) - if !ok { - return ChecksumValidationMetadata{}, false - } - return ChecksumValidationMetadata{ - AlgorithmsUsed: append(make([]string, 0, len(values)), values...), - }, true - -} - -// nopGetBucketAccessor is no-op accessor for operation that don't support bucket -// member as input -func nopGetBucketAccessor(input interface{}) (*string, bool) { - return nil, false -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return s3shared.AddResponseErrorMiddleware(stack) -} - -func disableAcceptEncodingGzip(stack *middleware.Stack) error { - return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) -} - -// ResponseError provides the HTTP centric error type wrapping the underlying -// error with the HTTP response value and the deserialized RequestID. -type ResponseError interface { - error - - ServiceHostID() string - ServiceRequestID() string -} - -var _ ResponseError = (*s3shared.ResponseError)(nil) - -// GetHostIDMetadata retrieves the host id from middleware metadata returns host -// id as string along with a boolean indicating presence of hostId on middleware -// metadata. -func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { - return s3shared.GetHostIDMetadata(metadata) -} - -// HTTPPresignerV4 represents presigner interface used by presign url client -type HTTPPresignerV4 interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// httpPresignerV4a represents sigv4a presigner interface used by presign url -// client -type httpPresignerV4a interface { - PresignHTTP( - ctx context.Context, credentials v4a.Credentials, r *http.Request, - payloadHash string, service string, regionSet []string, signingTime time.Time, - optFns ...func(*v4a.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignOptions represents the presign client options -type PresignOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // Presigner is the presigner used by the presign url client - Presigner HTTPPresignerV4 - - // Expires sets the expiration duration for the generated presign url. This should - // be the duration in seconds the presigned URL should be considered valid for. If - // not set or set to zero, presign url would default to expire after 900 seconds. - Expires time.Duration - - // presignerV4a is the presigner used by the presign url client - presignerV4a httpPresignerV4a -} - -func (o PresignOptions) copy() PresignOptions { - clientOptions := make([]func(*Options), len(o.ClientOptions)) - copy(clientOptions, o.ClientOptions) - o.ClientOptions = clientOptions - return o -} - -// WithPresignClientFromClientOptions is a helper utility to retrieve a function -// that takes PresignOption as input -func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { - return withPresignClientFromClientOptions(optFns).options -} - -type withPresignClientFromClientOptions []func(*Options) - -func (w withPresignClientFromClientOptions) options(o *PresignOptions) { - o.ClientOptions = append(o.ClientOptions, w...) -} - -// WithPresignExpires is a helper utility to append Expires value on presign -// options optional function -func WithPresignExpires(dur time.Duration) func(*PresignOptions) { - return withPresignExpires(dur).options -} - -type withPresignExpires time.Duration - -func (w withPresignExpires) options(o *PresignOptions) { - o.Expires = time.Duration(w) -} - -// PresignClient represents the presign url client -type PresignClient struct { - client *Client - options PresignOptions -} - -// NewPresignClient generates a presign client using provided API Client and -// presign options -func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { - var options PresignOptions - for _, fn := range optFns { - fn(&options) - } - if len(options.ClientOptions) != 0 { - c = New(c.options, options.ClientOptions...) - } - - if options.Presigner == nil { - options.Presigner = newDefaultV4Signer(c.options) - } - - if options.presignerV4a == nil { - options.presignerV4a = newDefaultV4aSigner(c.options) - } - - return &PresignClient{ - client: c, - options: options, - } -} - -func withNopHTTPClientAPIOption(o *Options) { - o.HTTPClient = smithyhttp.NopClient{} -} - -type presignContextPolyfillMiddleware struct { -} - -func (*presignContextPolyfillMiddleware) ID() string { - return "presignContextPolyfill" -} - -func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - schemeID := rscheme.Scheme.SchemeID() - ctx = s3cust.SetSignerVersion(ctx, schemeID) - if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { - if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr) - } - } else if schemeID == "aws.auth#sigv4a" { - if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) - } - } - - return next.HandleFinalize(ctx, in) -} - -type presignConverter PresignOptions - -func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - } - stack.Deserialize.Clear() - stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) - stack.Build.Remove("UserAgent") - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.Presigner, - LogSigning: options.ClientLogMode.IsSigning(), - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - - // extended s3 presigning - signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - ExpressCredentials: options.ExpressCredentials, - V4Presigner: c.Presigner, - V4aPresigner: c.presignerV4a, - LogSigning: options.ClientLogMode.IsSigning(), - }) - err = s3cust.RegisterPreSigningMiddleware(stack, signermv) - if err != nil { - return err - } - - if c.Expires < 0 { - return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires) - } - // add middleware to set expiration for s3 presigned url, if expiration is set to - // 0, this middleware sets a default expiration of 900 seconds - err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After) - if err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func withNoDefaultChecksumAPIOption(options *Options) { - options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go deleted file mode 100644 index 467a4647e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ /dev/null @@ -1,357 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// This operation aborts a multipart upload. After a multipart upload is aborted, -// no additional parts can be uploaded using that upload ID. The storage consumed -// by any previously uploaded parts will be freed. However, if any part uploads are -// currently in progress, those part uploads might or might not succeed. As a -// result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. -// -// To verify that all parts have been removed and prevent getting charged for the -// part storage, you should call the [ListParts]API operation and ensure that the parts list -// is empty. -// -// - Directory buckets - If multipart uploads in a directory bucket are in -// progress, you can't delete the bucket until all the in-progress multipart -// uploads are aborted or completed. To delete these in-progress multipart uploads, -// use the ListMultipartUploads operation to list the in-progress multipart -// uploads in the bucket and use the AbortMultipartUpload operation to abort all -// the in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to AbortMultipartUpload : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { - if params == nil { - params = &AbortMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AbortMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AbortMultipartUploadInput struct { - - // The bucket name to which the upload was taking place. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key of the object for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Upload ID that identifies the multipart upload. - // - // This member is required. - UploadId *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // If present, this header aborts an in progress multipart upload only if it was - // initiated on the provided timestamp. If the initiated timestamp of the multipart - // upload does not match the provided value, the operation returns a 412 - // Precondition Failed error. If the initiated timestamp matches or if the - // multipart upload doesn’t exist, the operation returns a 204 Success (No Content) - // response. - // - // This functionality is only supported for directory buckets. - IfMatchInitiatedTime *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type AbortMultipartUploadOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AbortMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *AbortMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AbortMultipartUpload", - } -} - -// getAbortMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*AbortMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getAbortMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go deleted file mode 100644 index 1948aab48..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ /dev/null @@ -1,641 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Completes a multipart upload by assembling previously uploaded parts. -// -// You first initiate the multipart upload and then upload all parts using the [UploadPart] -// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of -// an upload, you call this CompleteMultipartUpload operation to complete the -// upload. Upon receiving this request, Amazon S3 concatenates all the parts in -// ascending order by part number to create a new object. In the -// CompleteMultipartUpload request, you must provide the parts list and ensure that -// the parts list is complete. The CompleteMultipartUpload API operation -// concatenates the parts that you provide in the list. For each part in the list, -// you must provide the PartNumber value and the ETag value that are returned -// after that part was uploaded. -// -// The processing of a CompleteMultipartUpload request could take several minutes -// to finalize. After Amazon S3 begins processing the request, it sends an HTTP -// response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. A request could fail after the initial 200 OK -// response has been sent. This means that a 200 OK response can contain either a -// success or an error. The error response might be embedded in the 200 OK -// response. If you call this API operation directly, make sure to design your -// application to parse the contents of the response and handle it appropriately. -// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect -// the embedded error and apply error handling per your configuration settings -// (including automatically retrying the request as appropriate). If the condition -// persists, the SDKs throw an exception (or, for the SDKs that don't use -// exceptions, they return an error). -// -// Note that if CompleteMultipartUpload fails, applications should be prepared to -// retry any failed requests (including 500 error responses). For more information, -// see [Amazon S3 Error Best Practices]. -// -// You can't use Content-Type: application/x-www-form-urlencoded for the -// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. -// -// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted -// -// with Key Management Service, you must have permission to use the kms:Decrypt -// action for the CompleteMultipartUpload request to succeed. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Special errors -// -// - Error Code: EntityTooSmall -// -// - Description: Your proposed upload is smaller than the minimum allowed -// object size. Each part must be at least 5 MB in size, except the last part. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: InvalidPart -// -// - Description: One or more of the specified parts could not be found. The -// part might not have been uploaded, or the specified ETag might not have matched -// the uploaded part's ETag. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: InvalidPartOrder -// -// - Description: The list of parts was not in ascending order. The parts list -// must be specified in order by part number. -// -// - HTTP Status Code: 400 Bad Request -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to CompleteMultipartUpload : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { - if params == nil { - params = &CompleteMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CompleteMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CompleteMultipartUploadInput struct { - - // Name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // ID for the initiated multipart upload. - // - // This member is required. - UploadId *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME - // checksum is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // This header specifies the checksum type of the object, which determines how - // part-level checksums are combined to create an object-level checksum for - // multipart objects. You can use this header as a data integrity check to verify - // that the checksum type that is received is the same checksum that was specified. - // If the checksum type doesn’t match the checksum type that was specified for the - // object during the CreateMultipartUpload request, it’ll result in a BadDigest - // error. For more information, see Checking object integrity in the Amazon S3 User - // Guide. - ChecksumType types.ChecksumType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Uploads the object only if the ETag (entity tag) value provided during the - // WRITE operation matches the ETag of the object in S3. If the ETag values do not - // match, the operation returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should fetch the - // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and - // re-upload each part. - // - // Expects the ETag value as a string. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Uploads the object only if the object key name does not already exist in the - // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should re-initiate the - // multipart upload with CreateMultipartUpload and re-upload each part. - // - // Expects the '*' (asterisk) character. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // The expected total object size of the multipart upload request. If there’s a - // mismatch between the specified object size value and the actual object size - // value, it results in an HTTP 400 InvalidRequest error. - MpuObjectSize *string - - // The container for the multipart upload request information. - MultipartUpload *types.CompletedMultipartUpload - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is required only when the object was created using a checksum - // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type CompleteMultipartUploadOutput struct { - - // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. - // - // Access points are not supported by directory buckets. - Bucket *string - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is - // only present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME - // checksum is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // as a data integrity check to verify that the checksum type that is received is - // the same checksum type that was specified during the CreateMultipartUpload - // request. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Entity tag that identifies the newly created object's data. Objects with - // different object data will have different entity tags. The entity tag is an - // opaque string. The entity tag may or may not be an MD5 digest of the object - // data. If the entity tag is not an MD5 digest of the object data, it will contain - // one or more nonhexadecimal characters and/or will consist of less than 32 or - // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ETag *string - - // If the object expiration is configured, this will contain the expiration date ( - // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. - // - // This functionality is not supported for directory buckets. - Expiration *string - - // The object key of the newly created object. - Key *string - - // The URI that identifies the newly created object. - Location *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). - ServerSideEncryption types.ServerSideEncryption - - // Version ID of the newly created object, in case the bucket has versioning - // turned on. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CompleteMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CompleteMultipartUpload", - } -} - -// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*CompleteMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCompleteMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go deleted file mode 100644 index d4329b7e7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ /dev/null @@ -1,1044 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Creates a copy of an object that is already stored in Amazon S3. -// -// You can store individual objects of up to 5 TB in Amazon S3. You create a copy -// of your object up to 5 GB in size in a single atomic action using this API. -// However, to copy an object greater than 5 GB, you must use the multipart upload -// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. -// -// You can copy individual objects between general purpose buckets, between -// directory buckets, and between general purpose buckets and directory buckets. -// -// - Amazon S3 supports copy operations using Multi-Region Access Points only as -// a destination when using the Multi-Region Access Point ARN. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// - VPC endpoints don't support cross-Region requests (including copies). If -// you're using VPC endpoints, your source and destination buckets should be in the -// same Amazon Web Services Region as your VPC endpoint. -// -// Both the Region that you want to copy the object from and the Region that you -// want to copy the object to must be enabled for your account. For more -// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon -// Web Services Account Management Guide. -// -// Amazon S3 transfer acceleration does not support cross-Region copies. If you -// request a cross-Region copy using a transfer acceleration endpoint, you get a -// 400 Bad Request error. For more information, see [Transfer Acceleration]. -// -// Authentication and authorization All CopyObject requests must be authenticated -// and signed by using IAM credentials (access key ID and secret access key for the -// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source -// , must be signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use the IAM credentials to authenticate and -// authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions You must have read access to the source object and write access to -// the destination bucket. -// -// - General purpose bucket permissions - You must have permissions in an IAM -// policy based on the source and destination bucket types in a CopyObject -// operation. -// -// - If the source object is in a general purpose bucket, you must have -// s3:GetObject permission to read the source object that is being copied. -// -// - If the destination bucket is a general purpose bucket, you must have -// s3:PutObject permission to write the object copy to the destination bucket. -// -// - Directory bucket permissions - You must have permissions in a bucket policy -// or an IAM identity-based policy based on the source and destination bucket types -// in a CopyObject operation. -// -// - If the source object that you want to copy is in a directory bucket, you -// must have the s3express:CreateSession permission in the Action element of a -// policy to read the object. By default, the session is in the ReadWrite mode. -// If you want to restrict the access, you can explicitly set the -// s3express:SessionMode condition key to ReadOnly on the copy source bucket. -// -// - If the copy destination is a directory bucket, you must have the -// s3express:CreateSession permission in the Action element of a policy to write -// the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Response and special errors When the request is an HTTP 1.1 request, the -// response is chunk encoded. When the request is not an HTTP 1.1 request, the -// response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. -// -// - If the copy is successful, you receive a response with information about -// the copied object. -// -// - A copy request might return an error when Amazon S3 receives the copy -// request or while Amazon S3 is copying the files. A 200 OK response can contain -// either a success or an error. -// -// - If the error occurs before the copy action starts, you receive a standard -// Amazon S3 error. -// -// - If the error occurs during the copy operation, the error response is -// embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] -// . The 200 OK status code means the copy was accepted, but it doesn't mean the -// copy is complete. Another example is when you disconnect from Amazon S3 before -// the copy is complete, Amazon S3 might cancel the copy and you may receive a -// 200 OK response. You must stay connected to Amazon S3 until the entire -// response is successfully received and processed. -// -// If you call this API operation directly, make sure to design your application -// -// to parse the content of the response and handle it appropriately. If you use -// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the -// embedded error and apply error handling per your configuration settings -// (including automatically retrying the request as appropriate). If the condition -// persists, the SDKs throw an exception (or, for the SDKs that don't use -// exceptions, they return an error). -// -// Charge The copy request charge is based on the storage class and Region that -// you specify for the destination object. The request can also result in a data -// retrieval charge for the source if the source storage class bills for data -// retrieval. If the copy source is in a different region, the data transfer is -// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to CopyObject : -// -// [PutObject] -// -// [GetObject] -// -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror -// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ -func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { - if params == nil { - params = &CopyObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CopyObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CopyObjectInput struct { - - // The name of the destination bucket. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Copying objects across different Amazon Web Services Regions isn't supported - // when the source or destination bucket is in Amazon Web Services Local Zones. The - // source and destination buckets must have the same parent Amazon Web Services - // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code - // InvalidRequest . - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Specifies the source object for the copy operation. The source object can be up - // to 5 GB. If the source object is an object that was uploaded by using a - // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. - // - // You specify the value of the copy source in one of two formats, depending on - // whether you want to access the source object through an [access point]: - // - // - For objects not accessed through an access point, specify the name of the - // source bucket and the key of the source object, separated by a slash (/). For - // example, to copy the object reports/january.pdf from the general purpose - // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value - // must be URL-encoded. To copy the object reports/january.pdf from the directory - // bucket awsexamplebucket--use1-az5--x-s3 , use - // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be - // URL-encoded. - // - // - For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/ . For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2 , use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. - // - // - Amazon S3 supports copy operations using Access points only when the source - // and destination buckets are in the same Amazon Web Services Region. - // - // - Access points are not supported by directory buckets. - // - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf - // . The value must be URL-encoded. - // - // If your source bucket versioning is enabled, the x-amz-copy-source header by - // default identifies the current version of an object to copy. If the current - // version is a delete marker, Amazon S3 behaves as if the object was deleted. To - // copy a different version, use the versionId query parameter. Specifically, - // append ?versionId= to the value (for example, - // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. - // - // If you enable versioning on the destination bucket, Amazon S3 generates a - // unique version ID for the copied object. This version ID is different from the - // version ID of the source object. Amazon S3 returns the version ID of the copied - // object in the x-amz-version-id response header in the response. - // - // If you do not enable versioning or suspend it on the destination bucket, the - // version ID that Amazon S3 generates in the x-amz-version-id response header is - // always null. - // - // Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. - // - // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html - // - // This member is required. - CopySource *string - - // The key of the destination object. - // - // This member is required. - Key *string - - // The canned access control list (ACL) to apply to the object. - // - // When you copy an object, the ACL metadata is not preserved and is set to private - // by default. Only the owner has full access control. To override the default ACL - // setting, specify a new ACL when you generate a copy request. For more - // information, see [Using ACLs]. - // - // If the destination bucket that you're copying objects to uses the bucket owner - // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect - // permissions. Buckets that use this setting only accept PUT requests that don't - // specify an ACL or PUT requests that specify bucket owner full control ACLs, - // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 - // User Guide. - // - // - If your destination bucket uses the bucket owner enforced setting for - // Object Ownership, all objects written to the bucket by any account will be owned - // by the bucket owner. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html - // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ACL types.ObjectCannedACL - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. - // - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. - // - // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. - // - // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS - // encrypted objects from general purpose buckets to directory buckets, from - // directory buckets to general purpose buckets, or between directory buckets, - // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request - // is made for a KMS-encrypted object. - // - // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - BucketKeyEnabled *bool - - // Specifies the caching behavior along the request/reply chain. - CacheControl *string - - // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // When you copy an object, if the source object has a checksum, that checksum - // value will be copied to the new object by default. If the CopyObject request - // does not include this x-amz-checksum-algorithm header, the checksum algorithm - // will be copied from the source object to the destination object (if it's present - // on the source object). You can optionally specify a different checksum algorithm - // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported - // values will respond with the HTTP status code 400 Bad Request . - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Specifies presentational information for the object. Indicates whether an - // object should be displayed in a web browser or downloaded as a file. It allows - // specifying the desired filename for the downloaded file. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - // - // For directory buckets, only the aws-chunked value is supported in this header - // field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // A standard MIME type that describes the format of the object data. - ContentType *string - - // Copies the object if its entity tag (ETag) matches the specified tag. - // - // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request and evaluate as follows, Amazon S3 returns - // 200 OK and copies the data: - // - // - x-amz-copy-source-if-match condition evaluates to true - // - // - x-amz-copy-source-if-unmodified-since condition evaluates to false - CopySourceIfMatch *string - - // Copies the object if it has been modified since the specified time. - // - // If both the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request and - // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response - // code: - // - // - x-amz-copy-source-if-none-match condition evaluates to false - // - // - x-amz-copy-source-if-modified-since condition evaluates to true - CopySourceIfModifiedSince *time.Time - - // Copies the object if its entity tag (ETag) is different than the specified ETag. - // - // If both the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request and - // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response - // code: - // - // - x-amz-copy-source-if-none-match condition evaluates to false - // - // - x-amz-copy-source-if-modified-since condition evaluates to true - CopySourceIfNoneMatch *string - - // Copies the object if it hasn't been modified since the specified time. - // - // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request and evaluate as follows, Amazon S3 returns - // 200 OK and copies the data: - // - // - x-amz-copy-source-if-match condition evaluates to true - // - // - x-amz-copy-source-if-unmodified-since condition evaluates to false - CopySourceIfUnmodifiedSince *time.Time - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // If the source object for the copy is stored in Amazon S3 using SSE-C, you must - // provide the necessary encryption information in your request so that Amazon S3 - // can decrypt the object for copying. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKeyMD5 *string - - // The account ID of the expected destination bucket owner. If the account ID that - // you provide does not match the actual owner of the destination bucket, the - // request fails with the HTTP status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The account ID of the expected source bucket owner. If the account ID that you - // provide does not match the actual owner of the source bucket, the request fails - // with the HTTP status code 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to read the object data and its metadata. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the object ACL. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to write the ACL for the applicable object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata that's provided in the request. When copying an object, you can - // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. - // - // General purpose bucket - For general purpose buckets, when you grant - // permissions, you can use the s3:x-amz-metadata-directive condition key to - // enforce certain metadata behavior when objects are uploaded. For more - // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. - // - // x-amz-website-redirect-location is unique to each object and is not copied when - // using the x-amz-metadata-directive header. To copy the value, you must specify - // x-amz-website-redirect-location in the request header. - // - // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html - MetadataDirective types.MetadataDirective - - // Specifies whether you want to apply a legal hold to the object copy. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that you want to apply to the object copy. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when you want the Object Lock of the object copy to expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256 ). - // - // When you perform a CopyObject operation, if you want to use a different type of - // encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded. Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for the destination object encryption. The value of - // this header is a base64-encoded UTF-8 string holding JSON with the encryption - // context key-value pairs. - // - // General purpose buckets - This value must be explicitly added to specify - // encryption context for CopyObject requests if you want an additional encryption - // context for your destination object. The additional encryption context of the - // source object won't be copied to the destination object. For more information, - // see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. All GET and PUT requests for an object protected by KMS will fail if - // they're not made via SSL or using SigV4. For information about configuring any - // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. - // - // Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , - // the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned - // the ID of the KMS symmetric encryption customer managed key that's configured - // for your directory bucket's default encryption setting. If you want to specify - // the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only - // specify it with the ID (Key ID or Key ARN) of the KMS customer managed key - // that's configured for your directory bucket's default encryption setting. - // Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key - // ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the - // bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing this object in Amazon - // S3. Unrecognized or unsupported values won’t write a destination object and will - // receive a 400 Bad Request response. - // - // Amazon S3 automatically encrypts all new objects that are copied to an S3 - // bucket. When copying an object, if you don't specify encryption information in - // your copy request, the encryption setting of the target object is set to the - // default encryption configuration of the destination bucket. By default, all - // buckets have a base level of encryption configuration that uses server-side - // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a - // different default encryption configuration, Amazon S3 uses the corresponding - // encryption key to encrypt the target object copy. - // - // With server-side encryption, Amazon S3 encrypts your data as it writes your - // data to disks in its data centers and decrypts the data when you access it. For - // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. - // - // General purpose buckets - // - // - For general purpose buckets, there are the following supported options for - // server-side encryption: server-side encryption with Key Management Service (KMS) - // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS - // keys (DSSE-KMS), and server-side encryption with customer-provided encryption - // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided - // key to encrypt the target object copy. - // - // - When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // - // Directory buckets - // - // - For directory buckets, there are only two supported options for server-side - // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( - // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We - // recommend that the bucket's default encryption uses the desired encryption - // configuration and you don't override the bucket default encryption in your - // CreateSession requests or PUT object requests. Then, new objects are - // automatically encrypted with the desired encryption settings. For more - // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the - // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // - To encrypt new object copies to a directory bucket with SSE-KMS, we - // recommend you specify SSE-KMS as the directory bucket's default encryption - // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't - // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket - // for the lifetime of the bucket. After you specify a customer managed key for - // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS - // configuration. Then, when you perform a CopyObject operation and want to - // specify server-side encryption settings for new object copies with SSE-KMS in - // the encryption-related request headers, you must ensure the encryption key is - // the same customer managed key that you specified for the directory bucket's - // default encryption configuration. - // - // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - ServerSideEncryption types.ServerSideEncryption - - // If the x-amz-storage-class header is not used, the copied object will be stored - // in the STANDARD Storage Class by default. The STANDARD storage class provides - // high durability and high availability. Depending on performance needs, you can - // specify a different Storage Class. - // - // - Directory buckets - For directory buckets, only the S3 Express One Zone - // storage class is supported to store newly created objects. Unsupported storage - // class values won't write a destination object and will respond with the HTTP - // status code 400 Bad Request . - // - // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // You can use the CopyObject action to change the storage class of an object that - // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // Before using an object as a source object for the copy operation, you must - // restore a copy of it if it meets any of the following conditions: - // - // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is - // Archive Access or Deep Archive Access . - // - // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html - // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html - // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition - StorageClass types.StorageClass - - // The tag-set for the object copy in the destination bucket. This value must be - // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for - // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive - // , you don't need to set the x-amz-tagging header, because the tag-set will be - // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. - // - // The default value is the empty value. - // - // Directory buckets - For directory buckets in a CopyObject operation, only the - // empty tag-set is supported. Any requests that attempt to write non-empty tags - // into directory buckets will receive a 501 Not Implemented status code. When the - // destination bucket is a directory bucket, you will receive a 501 Not Implemented - // response in any of the following situations: - // - // - When you attempt to COPY the tag-set from an S3 source object that has - // non-empty tags. - // - // - When you attempt to REPLACE the tag-set of a source object and set a - // non-empty value to x-amz-tagging . - // - // - When you don't set the x-amz-tagging-directive header and the source object - // has non-empty tags. This is because the default value of - // x-amz-tagging-directive is COPY . - // - // Because only the empty tag-set is supported for directory buckets in a - // CopyObject operation, the following situations are allowed: - // - // - When you attempt to COPY the tag-set from a directory bucket source object - // that has no tags to a general purpose bucket. It copies an empty tag-set to the - // destination object. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and set the x-amz-tagging value of the directory bucket destination object to - // empty. - // - // - When you attempt to REPLACE the tag-set of a general purpose bucket source - // object that has non-empty tags and set the x-amz-tagging value of the - // directory bucket destination object to empty. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and don't set the x-amz-tagging value of the directory bucket destination - // object. This is because the default value of x-amz-tagging is the empty value. - Tagging *string - - // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. - // - // The default value is COPY . - // - // Directory buckets - For directory buckets in a CopyObject operation, only the - // empty tag-set is supported. Any requests that attempt to write non-empty tags - // into directory buckets will receive a 501 Not Implemented status code. When the - // destination bucket is a directory bucket, you will receive a 501 Not Implemented - // response in any of the following situations: - // - // - When you attempt to COPY the tag-set from an S3 source object that has - // non-empty tags. - // - // - When you attempt to REPLACE the tag-set of a source object and set a - // non-empty value to x-amz-tagging . - // - // - When you don't set the x-amz-tagging-directive header and the source object - // has non-empty tags. This is because the default value of - // x-amz-tagging-directive is COPY . - // - // Because only the empty tag-set is supported for directory buckets in a - // CopyObject operation, the following situations are allowed: - // - // - When you attempt to COPY the tag-set from a directory bucket source object - // that has no tags to a general purpose bucket. It copies an empty tag-set to the - // destination object. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and set the x-amz-tagging value of the directory bucket destination object to - // empty. - // - // - When you attempt to REPLACE the tag-set of a general purpose bucket source - // object that has non-empty tags and set the x-amz-tagging value of the - // directory bucket destination object to empty. - // - // - When you attempt to REPLACE the tag-set of a directory bucket source object - // and don't set the x-amz-tagging value of the directory bucket destination - // object. This is because the default value of x-amz-tagging is the empty value. - TaggingDirective types.TaggingDirective - - // If the destination bucket is configured as a website, redirects requests for - // this object copy to another object in the same bucket or to an external URL. - // Amazon S3 stores the value of this header in the object metadata. This value is - // unique to each object and is not copied when using the x-amz-metadata-directive - // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - noSmithyDocumentSerde -} - -func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.CopySource = in.CopySource - p.Key = in.Key - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type CopyObjectOutput struct { - - // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Container for all response elements. - CopyObjectResult *types.CopyObjectResult - - // Version ID of the source object that was copied. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceVersionId *string - - // If the object expiration is configured, the response includes this header. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - Expiration *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). - ServerSideEncryption types.ServerSideEncryption - - // Version ID of the newly created copy. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpCopyObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CopyObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CopyObject", - } -} - -// getCopyObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCopyObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*CopyObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCopyObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go deleted file mode 100644 index 6b4283e9f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ /dev/null @@ -1,414 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see [CreateBucket]CreateBucket . -// -// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have -// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous -// requests are never allowed to create buckets. By creating the bucket, you become -// the bucket owner. -// -// There are two types of buckets: general purpose buckets and directory buckets. -// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. -// -// - General purpose buckets - If you send your CreateBucket request to the -// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So -// the signature calculations in Signature Version 4 must use us-east-1 as the -// Region, even if the location constraint in the request specifies another Region -// where the bucket is to be created. If you create a bucket in a Region other than -// US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Regional endpoint. These endpoints support path-style -// requests in the format -// https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - In addition to the s3:CreateBucket -// permission, the following permissions are required in a policy when your -// CreateBucket request includes specific headers: -// -// - Access control lists (ACLs) - In your CreateBucket request, if you specify -// an access control list (ACL) and set it to public-read , public-read-write , -// authenticated-read , or if you explicitly specify any other custom ACLs, both -// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your -// CreateBucket request, if you set the ACL to private , or if you don't specify -// any ACLs, only the s3:CreateBucket permission is required. -// -// - Object Lock - In your CreateBucket request, if you set -// x-amz-bucket-object-lock-enabled to true, the -// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are -// required. -// -// - S3 Object Ownership - If your CreateBucket request includes the -// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. -// -// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly -// -// set S3 Object Ownership for the bucket to a different value than the default, -// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public -// access, you must first create the bucket (without the bucket ACL) and then -// explicitly disable Block Public Access on the bucket before using PutBucketAcl -// to set the ACL. If you try to create a bucket with a public ACL, the request -// will fail. -// -// For the majority of modern use cases in S3, we recommend that you keep all -// -// Block Public Access settings enabled and keep ACLs disabled. If you would like -// to share data with users outside of your account, you can use bucket policies as -// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. -// -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the -// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block -// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the -// Amazon S3 User Guide. -// -// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public -// -// Access are not supported for directory buckets. For directory buckets, all Block -// Public Access settings are enabled at the bucket level and S3 Object Ownership -// is set to Bucket owner enforced (ACLs disabled). These settings can't be -// modified. -// -// For more information about permissions for creating and working with directory -// -// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about -// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to CreateBucket : -// -// [PutObject] -// -// [DeleteBucket] -// -// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html -// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html -func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { - if params == nil { - params = &CreateBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateBucketInput struct { - - // The name of the bucket to create. - // - // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] - // in the Amazon S3 User Guide. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html - // - // This member is required. - Bucket *string - - // The canned ACL to apply to the bucket. - // - // This functionality is not supported for directory buckets. - ACL types.BucketCannedACL - - // The configuration information for the bucket. - CreateBucketConfiguration *types.CreateBucketConfiguration - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - // - // This functionality is not supported for directory buckets. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - // - // This functionality is not supported for directory buckets. - GrantRead *string - - // Allows grantee to read the bucket ACL. - // - // This functionality is not supported for directory buckets. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - // - // This functionality is not supported for directory buckets. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - // - // This functionality is not supported for directory buckets. - GrantWriteACP *string - - // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - // - // This functionality is not supported for directory buckets. - ObjectLockEnabledForBucket *bool - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the - // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or specify bucket owner full control ACLs (such as the predefined - // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). - // - // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are - // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where - // you must control access for each object individually. For more information about - // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Directory buckets - // use the bucket owner enforced setting for S3 Object Ownership. - // - // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ObjectOwnership types.ObjectOwnership - - noSmithyDocumentSerde -} - -func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) - p.DisableAccessPoints = ptr.Bool(true) -} - -type CreateBucketOutput struct { - - // A forward slash followed by the name of the bucket. - Location *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpCreateBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateBucket", - } -} - -// getCreateBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCreateBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go deleted file mode 100644 index cf89007da..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,290 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a metadata table configuration for a general purpose bucket. For more -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the following permissions. For -// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. -// -// If you also want to integrate your table bucket with Amazon Web Services -// analytics services so that you can query your metadata table, you need -// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. -// -// - s3:CreateBucketMetadataTableConfiguration -// -// - s3tables:CreateNamespace -// -// - s3tables:GetTable -// -// - s3tables:CreateTable -// -// - s3tables:PutTablePolicy -// -// The following operations are related to CreateBucketMetadataTableConfiguration : -// -// [DeleteBucketMetadataTableConfiguration] -// -// [GetBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html -// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html -func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &CreateBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that you want to create the metadata table - // configuration in. - // - // This member is required. - Bucket *string - - // The contents of your metadata table configuration. - // - // This member is required. - MetadataTableConfiguration *types.MetadataTableConfiguration - - // The checksum algorithm to use with your metadata table configuration. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Content-MD5 header for the metadata table configuration. - ContentMD5 *string - - // The expected owner of the general purpose bucket that contains your metadata - // table configuration. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type CreateBucketMetadataTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateBucketMetadataTableConfiguration", - } -} - -// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the -// request checksum algorithm value provided as input. -func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*CreateBucketMetadataTableConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go deleted file mode 100644 index cce928245..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ /dev/null @@ -1,998 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// This action initiates a multipart upload and returns an upload ID. This upload -// ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). -// You also include this upload ID in the final request to either complete or abort -// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] -// in the Amazon S3 User Guide. -// -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or abort -// the multipart upload. Amazon S3 frees up the space used to store the parts and -// stops charging you for storing them only after you either complete or abort a -// multipart upload. -// -// If you have configured a lifecycle rule to abort incomplete multipart uploads, -// the created multipart upload must be completed within the number of days -// specified in the bucket lifecycle configuration. Otherwise, the incomplete -// multipart upload becomes eligible for an abort action and Amazon S3 aborts the -// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. -// -// - Directory buckets - S3 Lifecycle is not supported by directory buckets. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Request signing For request signing, multipart upload is just a series of -// regular requests. You initiate a multipart upload, send one or more requests to -// upload parts, and then complete the multipart upload process. You sign each -// request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - To perform a multipart upload with -// encryption using an Key Management Service (KMS) KMS key, the requester must -// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. -// The requester must also have permissions for the kms:GenerateDataKey action -// for the CreateMultipartUpload API. Then, the requester needs permissions for -// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These -// permissions are required because Amazon S3 must decrypt and read data from the -// encrypted file parts before it completes the multipart upload. For more -// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Encryption -// -// - General purpose buckets - Server-side encryption is for data encryption at -// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. Amazon S3 automatically encrypts all new -// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you -// don't specify encryption information in your request, the encryption setting of -// the uploaded parts is set to the default encryption configuration of the -// destination bucket. By default, all buckets have a base level of encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). If the destination bucket has a default encryption configuration that -// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), -// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding -// KMS key, or a customer-provided key to encrypt the uploaded parts. When you -// perform a CreateMultipartUpload operation, if you want to use a different type -// of encryption setting for the uploaded parts, you can request that Amazon S3 -// encrypts the object with a different encryption key (such as an Amazon S3 -// managed key, a KMS key, or a customer-provided key). When the encryption setting -// in your request is different from the default encryption configuration of the -// destination bucket, the encryption setting in your request takes precedence. If -// you choose to provide your own encryption key, the request headers you provide -// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload -// request. -// -// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( -// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) -// – If you want Amazon Web Services to manage the keys used to encrypt data, -// specify the following headers in the request. -// -// - x-amz-server-side-encryption -// -// - x-amz-server-side-encryption-aws-kms-key-id -// -// - x-amz-server-side-encryption-context -// -// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide -// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web -// Services managed key ( aws/s3 key) in KMS to protect the data. -// -// - To perform a multipart upload with encryption by using an Amazon Web -// Services KMS key, the requester must have permission to the kms:Decrypt and -// kms:GenerateDataKey* actions on the key. These permissions are required -// because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in -// the Amazon S3 User Guide. -// -// - If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the KMS key, then you must have these permissions -// on the key policy. If your IAM user or role is in a different account from the -// key, then you must have the permissions on both the key policy and your IAM user -// or role. -// -// - All GET and PUT requests for an object protected by KMS fail if you don't -// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), -// or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the -// Amazon S3 User Guide. -// -// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] -// -// in the Amazon S3 User Guide. -// -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about server-side encryption with customer-provided -// -// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses -// the desired encryption configuration and you don't override the bucket default -// encryption in your CreateSession requests or PUT object requests. Then, new -// objects are automatically encrypted with the desired encryption settings. For -// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about -// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the -// -// encryption request headers must match the encryption settings that are specified -// in the CreateSession request. You can't override the values of the encryption -// settings ( x-amz-server-side-encryption , -// x-amz-server-side-encryption-aws-kms-key-id , -// x-amz-server-side-encryption-context , and -// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the -// CreateSession request. You don't need to explicitly specify these encryption -// settings values in Zonal endpoint API calls, and Amazon S3 will use the -// encryption settings values from the CreateSession request to protect new -// objects in the directory bucket. -// -// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the -// -// session token refreshes automatically to avoid service interruptions when a -// session expires. The CLI or the Amazon Web Services SDKs use the bucket's -// default encryption configuration for the CreateSession request. It's not -// supported to override the encryption settings values in the CreateSession -// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption -// request headers must match the default encryption configuration of the directory -// bucket. -// -// For directory buckets, when you perform a CreateMultipartUpload operation and an -// -// UploadPartCopy operation, the request headers you provide in the -// CreateMultipartUpload request must match the default encryption configuration -// of the destination bucket. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to CreateMultipartUpload : -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version -// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html -// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { - if params == nil { - params = &CreateMultipartUploadInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateMultipartUploadOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateMultipartUploadInput struct { - - // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload is to be initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. Amazon S3 supports a set of predefined - // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can grant access permissions to individual Amazon - // Web Services accounts or to predefined groups defined by Amazon S3. These - // permissions are then added to the access control list (ACL) on the new object. - // For more information, see [Using ACLs]. One way to grant the permissions using the request - // headers is to specify a canned ACL with the x-amz-acl request header. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html - ACL types.ObjectCannedACL - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // - // General purpose buckets - Setting this header to true causes Amazon S3 to use - // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this - // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. - // - // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Indicates the checksum type that you want Amazon S3 to use to calculate the - // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - // - // For directory buckets, only the aws-chunked value is supported in this header - // field. - ContentEncoding *string - - // The language that the content is in. - ContentLanguage *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantFullControl *string - - // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantRead *string - - // Specify access permissions explicitly to allows grantee to read the object ACL. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantReadACP *string - - // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. - // - // By default, all objects are private. Only the owner has full access control. - // When uploading an object, you can use this header to explicitly grant access - // permissions to specific Amazon Web Services accounts or groups. This header maps - // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. - // - // You specify each grantee as a type=value pair, where the type is one of the - // following: - // - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - // - uri – if you are granting permissions to a predefined group - // - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the - // Amazon Web Services General Reference. - // - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: - // - // x-amz-grant-read: id="11112222333", id="444455556666" - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - GrantWriteACP *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether you want to apply a legal hold to the uploaded object. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // Specifies the Object Lock mode that you want to apply to the uploaded object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // Specifies the date and time when you want the Object Lock to expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the customer-provided encryption key - // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a Base64 encoded string of a UTF-8 - // encoded JSON, which contains the encryption context as key-value pairs. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. If the KMS key doesn't exist in the same account that's issuing the - // command, you must use the full Key ARN not the Key ID. - // - // General purpose buckets - If you specify x-amz-server-side-encryption with - // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key - // Alias) of the KMS key to use. If you specify - // x-amz-server-side-encryption:aws:kms or - // x-amz-server-side-encryption:aws:kms:dsse , but do not provide - // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. - // - // Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , - // the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned - // the ID of the KMS symmetric encryption customer managed key that's configured - // for your directory bucket's default encryption setting. If you want to specify - // the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only - // specify it with the ID (Key ID or Key ARN) of the KMS customer managed key - // that's configured for your directory bucket's default encryption setting. - // Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key - // ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the - // bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). - // - // - Directory buckets - For directory buckets, there are only two supported - // options for server-side encryption: server-side encryption with Amazon S3 - // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys - // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses - // the desired encryption configuration and you don't override the bucket default - // encryption in your CreateSession requests or PUT object requests. Then, new - // objects are automatically encrypted with the desired encryption settings. For - // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about - // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the - // encryption request headers must match the encryption settings that are specified - // in the CreateSession request. You can't override the values of the encryption - // settings ( x-amz-server-side-encryption , - // x-amz-server-side-encryption-aws-kms-key-id , - // x-amz-server-side-encryption-context , and - // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the - // CreateSession request. You don't need to explicitly specify these encryption - // settings values in Zonal endpoint API calls, and Amazon S3 will use the - // encryption settings values from the CreateSession request to protect new - // objects in the directory bucket. - // - // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the - // session token refreshes automatically to avoid service interruptions when a - // session expires. The CLI or the Amazon Web Services SDKs use the bucket's - // default encryption configuration for the CreateSession request. It's not - // supported to override the encryption settings values in the CreateSession - // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption - // request headers must match the default encryption configuration of the directory - // bucket. - // - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - ServerSideEncryption types.ServerSideEncryption - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. - // - // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // - // This functionality is not supported for directory buckets. - Tagging *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - noSmithyDocumentSerde -} - -func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type CreateMultipartUploadOutput struct { - - // If the bucket has a lifecycle rule configured with an action to abort - // incomplete multipart uploads and the prefix in the lifecycle rule matches the - // object name in the request, the response includes this header. The header - // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. - // - // The response also includes the x-amz-abort-rule-id header that provides the ID - // of the lifecycle configuration rule that defines the abort action. - // - // This functionality is not supported for directory buckets. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortDate *time.Time - - // This header is returned along with the x-amz-abort-date header. It identifies - // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - // - // This functionality is not supported for directory buckets. - AbortRuleId *string - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - // - // Access points are not supported by directory buckets. - Bucket *string - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm types.ChecksumAlgorithm - - // Indicates the checksum type that you want Amazon S3 to use to calculate the - // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Object key for which the multipart upload was initiated. - Key *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). - ServerSideEncryption types.ServerSideEncryption - - // ID for the initiated multipart upload. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMultipartUpload"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateMultipartUploadInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateMultipartUpload", - } -} - -// getCreateMultipartUploadBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateMultipartUploadInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateMultipartUploadBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go deleted file mode 100644 index 1b6452b35..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ /dev/null @@ -1,432 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a session that establishes temporary security credentials to support -// fast authentication and authorization for the Zonal endpoint API operations on -// directory buckets. For more information about Zonal endpoint API operations that -// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 -// User Guide. -// -// To make Zonal endpoint API requests on a directory bucket, use the CreateSession -// API operation. Specifically, you grant s3express:CreateSession permission to a -// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM -// credentials to make the CreateSession API request on the bucket, which returns -// temporary security credentials that include the access key ID, secret access -// key, session token, and expiration. These credentials have associated -// permissions to access the Zonal endpoint API operations. After the session is -// created, you don’t need to use other policies to grant permissions to each Zonal -// endpoint API individually. Instead, in your Zonal endpoint API requests, you -// sign your requests by applying the temporary security credentials of the session -// to the request headers and following the SigV4 protocol for authentication. You -// also apply the session token to the x-amz-s3session-token request header for -// authorization. Temporary security credentials are scoped to the bucket and -// expire after 5 minutes. After the expiration time, any calls that you make with -// those credentials will fail. You must use IAM credentials again to make a -// CreateSession API request that generates a new set of temporary credentials for -// use. Temporary credentials cannot be extended or refreshed beyond the original -// specified interval. -// -// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes -// automatically to avoid service interruptions when a session expires. We -// recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 -// User Guide. -// -// - You must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style -// requests are not supported. For more information about endpoints in Availability -// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints -// in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// - CopyObject API operation - Unlike other Zonal endpoint API operations, the -// CopyObject API operation doesn't use the temporary security credentials -// returned from the CreateSession API operation for authentication and -// authorization. For information about authentication and authorization of the -// CopyObject API operation on directory buckets, see [CopyObject]. -// -// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the -// HeadBucket API operation doesn't use the temporary security credentials -// returned from the CreateSession API operation for authentication and -// authorization. For information about authentication and authorization of the -// HeadBucket API operation on directory buckets, see [HeadBucket]. -// -// Permissions To obtain temporary security credentials, you must create a bucket -// policy or an IAM identity-based policy that grants s3express:CreateSession -// permission to the bucket. In a policy, you can have the s3express:SessionMode -// condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] -// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 -// User Guide. -// -// To grant cross-account access to Zonal endpoint API operations, the bucket -// policy should also grant both accounts the s3express:CreateSession permission. -// -// If you want to encrypt objects with SSE-KMS, you must also have the -// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based -// policies and KMS key policies for the target KMS key. -// -// Encryption For directory buckets, there are only two supported options for -// server-side encryption: server-side encryption with Amazon S3 managed keys -// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms -// ). We recommend that the bucket's default encryption uses the desired encryption -// configuration and you don't override the bucket default encryption in your -// CreateSession requests or PUT object requests. Then, new objects are -// automatically encrypted with the desired encryption settings. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the -// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low -// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must -// specify SSE-KMS as the directory bucket's default encryption configuration with -// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal -// endpoint API operations, new objects are automatically encrypted and decrypted -// with SSE-KMS and S3 Bucket Keys during the session. -// -// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] ( -// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default -// encryption configuration with a customer managed key, you can't change the -// customer managed key for the bucket's SSE-KMS configuration. -// -// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't -// override the values of the encryption settings ( x-amz-server-side-encryption , -// x-amz-server-side-encryption-aws-kms-key-id , -// x-amz-server-side-encryption-context , and -// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession -// request. You don't need to explicitly specify these encryption settings values -// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings -// values from the CreateSession request to protect new objects in the directory -// bucket. -// -// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the -// session token refreshes automatically to avoid service interruptions when a -// session expires. The CLI or the Amazon Web Services SDKs use the bucket's -// default encryption configuration for the CreateSession request. It's not -// supported to override the encryption settings values in the CreateSession -// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not -// supported to override the values of the encryption settings from the -// CreateSession request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html -// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters -// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { - if params == nil { - params = &CreateSessionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateSessionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateSessionInput struct { - - // The name of the bucket that you create a session for. - // - // This member is required. - Bucket *string - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using KMS keys (SSE-KMS). - // - // S3 Bucket Keys are always enabled for GET and PUT operations in a directory - // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy - // SSE-KMS encrypted objects from general purpose buckets to directory buckets, - // from directory buckets to general purpose buckets, or between directory buckets, - // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a - // copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for object encryption. The value of this header is a - // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption - // context as key-value pairs. This value is stored as object metadata and - // automatically gets passed on to Amazon Web Services KMS for future GetObject - // operations on this object. - // - // General purpose buckets - This value must be explicitly added during CopyObject - // operations if you want an additional encryption context for your object. For - // more information, see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // If you specify x-amz-server-side-encryption with aws:kms , you must specify the - // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key - // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you - // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key - // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist - // in the same account that't issuing the command, you must use the full Key ARN - // not the Key ID. - // - // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket for the - // lifetime of the bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm to use when you store objects in the - // directory bucket. - // - // For directory buckets, there are only two supported options for server-side - // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 - // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, - // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 - // User Guide. - // - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - ServerSideEncryption types.ServerSideEncryption - - // Specifies the mode of the session that will be created, either ReadWrite or - // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is - // capable of executing all the Zonal endpoint API operations on a directory - // bucket. A ReadOnly session is constrained to execute the following Zonal - // endpoint API operations: GetObject , HeadObject , ListObjectsV2 , - // GetObjectAttributes , ListParts , and ListMultipartUploads . - SessionMode types.SessionMode - - noSmithyDocumentSerde -} - -func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type CreateSessionOutput struct { - - // The established temporary security credentials for the created session. - // - // This member is required. - Credentials *types.SessionCredentials - - // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS - // keys (SSE-KMS). - BucketKeyEnabled *bool - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - // This value is stored as object metadata and automatically gets passed on to - // Amazon Web Services KMS for future GetObject operations on this object. - SSEKMSEncryptionContext *string - - // If you specify x-amz-server-side-encryption with aws:kms , this header indicates - // the ID of the KMS symmetric encryption customer managed key that was used for - // object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store objects in the - // directory bucket. - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpCreateSessionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *CreateSessionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateSession", - } -} - -// getCreateSessionBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getCreateSessionBucketMember(input interface{}) (*string, bool) { - in := input.(*CreateSessionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getCreateSessionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go deleted file mode 100644 index 8e34c67c5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ /dev/null @@ -1,303 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the S3 bucket. All objects (including all object versions and delete -// markers) in the bucket must be deleted before the bucket itself can be deleted. -// -// - Directory buckets - If multipart uploads in a directory bucket are in -// progress, you can't delete the bucket until all the in-progress multipart -// uploads are aborted or completed. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Regional endpoint. These endpoints support path-style -// requests in the format -// https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - You must have the s3:DeleteBucket -// permission on the specified bucket in a policy. -// -// - Directory bucket permissions - You must have the s3express:DeleteBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the -// Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to DeleteBucket : -// -// [CreateBucket] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { - if params == nil { - params = &DeleteBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketInput struct { - - // Specifies the bucket being deleted. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucket", - } -} - -// getDeleteBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignDeleteBucket is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &DeleteBucketInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns, - c.client.addOperationDeleteBucketMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addDeleteBucketPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go deleted file mode 100644 index ebeba2295..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). -// -// To use this operation, you must have permissions to perform the -// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// The following operations are related to DeleteBucketAnalyticsConfiguration : -// -// [GetBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketAnalyticsConfigurationInput struct { - - // The name of the bucket from which an analytics configuration is deleted. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketAnalyticsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketAnalyticsConfiguration", - } -} - -// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go deleted file mode 100644 index e10144751..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ /dev/null @@ -1,235 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the cors configuration information set for the bucket. -// -// To use this operation, you must have permission to perform the s3:PutBucketCORS -// action. The bucket owner has this permission by default and can grant this -// permission to others. -// -// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. -// -// # Related Resources -// -// [PutBucketCors] -// -// [RESTOPTIONSobject] -// -// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html -func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { - if params == nil { - params = &DeleteBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketCorsInput struct { - - // Specifies the bucket whose cors configuration is being deleted. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketCorsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketCors", - } -} - -// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go deleted file mode 100644 index c1a1b810d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ /dev/null @@ -1,272 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This implementation of the DELETE action resets the default encryption for the -// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets - For information about the bucket default -// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. For information about -// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:PutEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to DeleteBucketEncryption : -// -// [PutBucketEncryption] -// -// [GetBucketEncryption] -// -// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html -// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { - if params == nil { - params = &DeleteBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketEncryptionInput struct { - - // The name of the bucket containing the server-side encryption configuration to - // delete. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketEncryptionOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketEncryption", - } -} - -// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go deleted file mode 100644 index 3fd1f17cd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,249 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to DeleteBucketIntelligentTieringConfiguration include: -// -// [GetBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketIntelligentTieringConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketIntelligentTieringConfiguration", - } -} - -// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go deleted file mode 100644 index 68fbe69df..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes an inventory configuration (identified by the inventory ID) from the -// bucket. -// -// To use this operation, you must have permissions to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. -// -// Operations related to DeleteBucketInventoryConfiguration include: -// -// [GetBucketInventoryConfiguration] -// -// [PutBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketInventoryConfigurationInput struct { - - // The name of the bucket containing the inventory configuration to delete. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketInventoryConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketInventoryConfiguration", - } -} - -// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go deleted file mode 100644 index 7eb820b46..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ /dev/null @@ -1,274 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 -// removes all the lifecycle configuration rules in the lifecycle subresource -// associated with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the deleted -// lifecycle configuration. -// -// Permissions -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:PutLifecycleConfiguration permission. -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. -// -// Related actions include: -// -// [PutBucketLifecycleConfiguration] -// -// [GetBucketLifecycleConfiguration] -// -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { - if params == nil { - params = &DeleteBucketLifecycleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketLifecycleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketLifecycleInput struct { - - // The bucket name of the lifecycle to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketLifecycleOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketLifecycle"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketLifecycleInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketLifecycle", - } -} - -// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketLifecycleInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketLifecycleBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go deleted file mode 100644 index ff44efe08..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,234 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes a metadata table configuration from a general purpose bucket. For more -// -// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the -// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The following operations are related to DeleteBucketMetadataTableConfiguration : -// -// [CreateBucketMetadataTableConfiguration] -// -// [GetBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html -// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that you want to remove the metadata table - // configuration from. - // - // This member is required. - Bucket *string - - // The expected bucket owner of the general purpose bucket that you want to - // remove the metadata table configuration from. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketMetadataTableConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketMetadataTableConfiguration", - } -} - -// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go deleted file mode 100644 index 86ab95732..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ /dev/null @@ -1,251 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the -// s3:PutMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to DeleteBucketMetricsConfiguration : -// -// [GetBucketMetricsConfiguration] -// -// [PutBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// [Monitoring Metrics with Amazon CloudWatch] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &DeleteBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketMetricsConfigurationInput struct { - - // The name of the bucket containing the metrics configuration to delete. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketMetricsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketMetricsConfiguration", - } -} - -// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go deleted file mode 100644 index da46b921e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:PutBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. -// -// The following operations are related to DeleteBucketOwnershipControls : -// -// # GetBucketOwnershipControls -// -// # PutBucketOwnershipControls -// -// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { - if params == nil { - params = &DeleteBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketOwnershipControlsInput struct { - - // The Amazon S3 bucket whose OwnershipControls you want to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketOwnershipControlsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketOwnershipControls", - } -} - -// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go deleted file mode 100644 index 8e1d89b76..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ /dev/null @@ -1,284 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Deletes the policy of a specified bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the DeleteBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 -// Access Denied error. If you have the correct permissions, but you're not using -// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// # The following operations are related to DeleteBucketPolicy -// -// [CreateBucket] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { - if params == nil { - params = &DeleteBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketPolicyInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketPolicyOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketPolicy", - } -} - -// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go deleted file mode 100644 index d2c8b0328..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the replication configuration from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:PutReplicationConfiguration action. The bucket owner has these permissions by -// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// It can take a while for the deletion of a replication configuration to fully -// propagate. -// -// For information about replication configuration, see [Replication] in the Amazon S3 User -// Guide. -// -// The following operations are related to DeleteBucketReplication : -// -// [PutBucketReplication] -// -// [GetBucketReplication] -// -// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { - if params == nil { - params = &DeleteBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketReplicationInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketReplicationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketReplication", - } -} - -// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go deleted file mode 100644 index 22f0656ed..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Deletes the tags from the bucket. -// -// To use this operation, you must have permission to perform the -// s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// The following operations are related to DeleteBucketTagging : -// -// [GetBucketTagging] -// -// [PutBucketTagging] -// -// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html -// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html -func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { - if params == nil { - params = &DeleteBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketTaggingInput struct { - - // The bucket that has the tag set to be removed. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketTaggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketTagging", - } -} - -// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go deleted file mode 100644 index 46aded0bb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ /dev/null @@ -1,241 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This action removes the website configuration for a bucket. Amazon S3 returns a -// 200 OK response upon successfully deleting a website configuration on the -// specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 -// response if the bucket specified in the request does not exist. -// -// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, -// only the bucket owner can delete the website configuration attached to a bucket. -// However, bucket owners can grant other users permission to delete the website -// configuration by writing a bucket policy granting them the -// S3:DeleteBucketWebsite permission. -// -// For more information about hosting websites, see [Hosting Websites on Amazon S3]. -// -// The following operations are related to DeleteBucketWebsite : -// -// [GetBucketWebsite] -// -// [PutBucketWebsite] -// -// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html -// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { - if params == nil { - params = &DeleteBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteBucketWebsiteInput struct { - - // The bucket name for which you want to remove the website configuration. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeleteBucketWebsiteOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteBucketWebsite", - } -} - -// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go deleted file mode 100644 index fa0031a57..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ /dev/null @@ -1,464 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Removes an object from a bucket. The behavior depends on the bucket's -// versioning state: -// -// - If bucket versioning is not enabled, the operation permanently deletes the -// object. -// -// - If bucket versioning is enabled, the operation inserts a delete marker, -// which becomes the current version of the object. To permanently delete an object -// in a versioned bucket, you must include the object’s versionId in the request. -// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. -// -// - If bucket versioning is suspended, the operation removes the object that -// has a null versionId , if there is one, and inserts a delete marker that -// becomes the current version of the object. If there isn't an object with a null -// versionId , and all versions of the object have a versionId , Amazon S3 does -// not remove the object and only inserts a delete marker. To permanently delete an -// object that has a versionId , you must include the object’s versionId in the -// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. For this API operation, only the null value of the version ID is -// supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// To remove a specific version, you must use the versionId query parameter. Using -// this query parameter permanently deletes the version. If the object deleted is a -// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. -// -// If the object you want to delete is in a bucket where the bucket versioning -// configuration is MFA Delete enabled, you must include the x-amz-mfa request -// header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User -// Guide. To see sample requests that use versioning, see [Sample Request]. -// -// Directory buckets - MFA delete is not supported by directory buckets. -// -// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to -// enable Amazon S3 to remove them for you. If you want to block users or accounts -// from removing or deleting objects from your bucket, you must deny them the -// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. -// -// Directory buckets - S3 Lifecycle is not supported by directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your DeleteObjects request includes specific headers. -// -// - s3:DeleteObject - To delete an object from a bucket, you must always have -// the s3:DeleteObject permission. -// -// - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following action is related to DeleteObject : -// -// [PutObject] -// -// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete -// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html -func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { - if params == nil { - params = &DeleteObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectInput struct { - - // The bucket name of the bucket containing the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key name of the object to delete. - // - // This member is required. - Key *string - - // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to - // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - // - // This functionality is not supported for directory buckets. - BypassGovernanceRetention *bool - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The If-Match header field makes the request method conditional on ETags. If the - // ETag value does not match, the operation returns a 412 Precondition Failed - // error. If the ETag matches or if the object doesn't exist, the operation will - // return a 204 Success (No Content) response . - // - // For more information about conditional requests, see [RFC 7232]. - // - // This functionality is only supported for directory buckets. - // - // [RFC 7232]: https://docs.aws.amazon.com/https:/tools.ietf.org/html/rfc7232 - IfMatch *string - - // If present, the object is deleted only if its modification times matches the - // provided Timestamp . If the Timestamp values do not match, the operation - // returns a 412 Precondition Failed error. If the Timestamp matches or if the - // object doesn’t exist, the operation returns a 204 Success (No Content) response. - // - // This functionality is only supported for directory buckets. - IfMatchLastModifiedTime *time.Time - - // If present, the object is deleted only if its size matches the provided size in - // bytes. If the Size value does not match, the operation returns a 412 - // Precondition Failed error. If the Size matches or if the object doesn’t exist, - // the operation returns a 204 Success (No Content) response. - // - // This functionality is only supported for directory buckets. - // - // You can use the If-Match , x-amz-if-match-last-modified-time and - // x-amz-if-match-size conditional headers in conjunction with each-other or - // individually. - IfMatchSize *int64 - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - // - // This functionality is not supported for directory buckets. - MFA *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // For directory buckets in this API operation, only the null value of the version - // ID is supported. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type DeleteObjectOutput struct { - - // Indicates whether the specified object version that was permanently deleted was - // (true) or was not (false) a delete marker before deletion. In a simple DELETE, - // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. - // - // This functionality is not supported for directory buckets. - DeleteMarker *bool - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObject", - } -} - -// getDeleteObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignDeleteObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &DeleteObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns, - c.client.addOperationDeleteObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addDeleteObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go deleted file mode 100644 index 54bfd7d72..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes the entire tag set from the specified object. For more information -// about managing object tags, see [Object Tagging]. -// -// To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. -// -// To delete tags of a specific object version, add the versionId query parameter -// in the request. You will need permission for the s3:DeleteObjectVersionTagging -// action. -// -// The following operations are related to DeleteObjectTagging : -// -// [PutObjectTagging] -// -// [GetObjectTagging] -// -// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { - if params == nil { - params = &DeleteObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectTaggingInput struct { - - // The bucket name containing the objects from which to remove the tags. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key that identifies the object in the bucket from which to remove all tags. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The versionId of the object that the tag-set will be removed from. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type DeleteObjectTaggingOutput struct { - - // The versionId of the object the tag-set was removed from. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObjectTagging", - } -} - -// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go deleted file mode 100644 index 4c4e39026..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ /dev/null @@ -1,468 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation enables you to delete multiple objects from a bucket using a -// single HTTP request. If you know the object keys that you want to delete, then -// this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. -// -// The request can contain a list of up to 1000 keys that you want to delete. In -// the XML, you provide the object key names, and optionally, version IDs if you -// want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the -// result of that delete, success or failure, in the response. Note that if the -// object specified in the request is not found, Amazon S3 returns the result as -// deleted. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. -// -// When performing this action on an MFA Delete enabled bucket, that attempts to -// delete any versioned objects, you must include an MFA token. If you do not -// provide one, the entire request will fail, even if there are non-versioned -// objects you are trying to delete. If you provide an invalid token, whether there -// are versioned keys in the request or not, the entire Multi-Object Delete request -// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. -// -// Directory buckets - MFA delete is not supported by directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your DeleteObjects request includes specific headers. -// -// - s3:DeleteObject - To delete an object from a bucket, you must always specify -// the s3:DeleteObject permission. -// -// - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion -// permission. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Content-MD5 request header -// -// - General purpose bucket - The Content-MD5 request header is required for all -// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that -// your request body has not been altered in transit. -// -// - Directory bucket - The Content-MD5 request header or a additional checksum -// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , -// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all -// Multi-Object Delete requests. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to DeleteObjects : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [AbortMultipartUpload] -// -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { - if params == nil { - params = &DeleteObjectsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeleteObjectsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeleteObjectsInput struct { - - // The bucket name containing the objects to delete. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Container for the request. - // - // This member is required. - Delete *types.Delete - - // Specifies whether you want to delete this object even if it has a - // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - // - // This functionality is not supported for directory buckets. - BypassGovernanceRetention *bool - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC-32 - // - // - CRC-32C - // - // - CRC-64NVME - // - // - SHA-1 - // - // - SHA-256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. Required to - // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. - // - // When performing the DeleteObjects operation on an MFA delete enabled bucket, - // which attempts to delete the specified versioned objects, you must include an - // MFA token. If you don't provide an MFA token, the entire request will fail, even - // if there are non-versioned objects that you are trying to delete. If you provide - // an invalid token, whether there are versioned object keys in the request or not, - // the entire Multi-Object Delete request will fail. For information about MFA - // Delete, see [MFA Delete]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete - MFA *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type DeleteObjectsOutput struct { - - // Container element for a successful delete. It identifies the object that was - // successfully deleted. - Deleted []types.DeletedObject - - // Container for a failed delete action that describes the object that Amazon S3 - // attempted to delete and the error it encountered. - Errors []types.Error - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjects"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeleteObjectsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeleteObjects", - } -} - -// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*DeleteObjectsInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getDeleteObjectsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getDeleteObjectsBucketMember(input interface{}) (*string, bool) { - in := input.(*DeleteObjectsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeleteObjectsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go deleted file mode 100644 index 7205bb88d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For -// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// The following operations are related to DeletePublicAccessBlock : -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock] -// -// [PutPublicAccessBlock] -// -// [GetBucketPolicyStatus] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html -func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { - if params == nil { - params = &DeletePublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DeletePublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DeletePublicAccessBlockInput struct { - - // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type DeletePublicAccessBlockOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *DeletePublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DeletePublicAccessBlock", - } -} - -// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*DeletePublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getDeletePublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go deleted file mode 100644 index 8d33fa67d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the GET action uses the accelerate subresource to return -// the Transfer Acceleration state of a bucket, which is either Enabled or -// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. -// -// To use this operation, you must have permission to perform the -// s3:GetAccelerateConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. -// -// You set the Transfer Acceleration state of an existing bucket to Enabled or -// Suspended by using the [PutBucketAccelerateConfiguration] operation. -// -// A GET accelerate request does not return a state value for a bucket that has no -// transfer acceleration state. A bucket has no Transfer Acceleration state if a -// state has never been set on the bucket. -// -// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User -// Guide. -// -// The following operations are related to GetBucketAccelerateConfiguration : -// -// [PutBucketAccelerateConfiguration] -// -// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { - if params == nil { - params = &GetBucketAccelerateConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAccelerateConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAccelerateConfigurationInput struct { - - // The name of the bucket for which the accelerate configuration is retrieved. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAccelerateConfigurationOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // The accelerate configuration of the bucket. - Status types.BucketAccelerateStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAccelerateConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAccelerateConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAccelerateConfiguration", - } -} - -// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAccelerateConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go deleted file mode 100644 index 8ae6505d0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the GET action uses the acl subresource to return the -// access control list (ACL) of a bucket. To use GET to return the ACL of the -// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission -// is granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the -// bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. -// -// The following operations are related to GetBucketAcl : -// -// [ListObjects] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { - if params == nil { - params = &GetBucketAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAclInput struct { - - // Specifies the S3 bucket whose ACL is being requested. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAclOutput struct { - - // A list of grants. - Grants []types.Grant - - // Container for the bucket owner's display name and ID. - Owner *types.Owner - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAcl", - } -} - -// getGetBucketAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketAclBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go deleted file mode 100644 index a0e8ac7c0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,253 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the GET action returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. -// -// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User -// Guide. -// -// The following operations are related to GetBucketAnalyticsConfiguration : -// -// [DeleteBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &GetBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketAnalyticsConfigurationInput struct { - - // The name of the bucket from which an analytics configuration is retrieved. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketAnalyticsConfigurationOutput struct { - - // The configuration and any analyses for the analytics filter. - AnalyticsConfiguration *types.AnalyticsConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketAnalyticsConfiguration", - } -} - -// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go deleted file mode 100644 index ee1d6b911..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ /dev/null @@ -1,263 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the Cross-Origin Resource Sharing (CORS) configuration information set -// for the bucket. -// -// To use this operation, you must have permission to perform the s3:GetBucketCORS -// action. By default, the bucket owner has this permission and can grant it to -// others. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. -// -// The following operations are related to GetBucketCors : -// -// [PutBucketCors] -// -// [DeleteBucketCors] -// -// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html -func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { - if params == nil { - params = &GetBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketCorsInput struct { - - // The bucket name for which to get the cors configuration. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketCorsOutput struct { - - // A set of origins and methods (cross-origin access that you want to allow). You - // can add up to 100 rules to the configuration. - CORSRules []types.CORSRule - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketCors", - } -} - -// getGetBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go deleted file mode 100644 index a7dda520a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ /dev/null @@ -1,278 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the default encryption configuration for an Amazon S3 bucket. By -// default, all buckets have a default encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets - For information about the bucket default -// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. For information about -// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:GetEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:GetEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to GetBucketEncryption : -// -// [PutBucketEncryption] -// -// [DeleteBucketEncryption] -// -// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html -// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { - if params == nil { - params = &GetBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketEncryptionInput struct { - - // The name of the bucket from which the server-side encryption configuration is - // retrieved. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketEncryptionOutput struct { - - // Specifies the default server-side-encryption configuration. - ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketEncryption", - } -} - -// getGetBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go deleted file mode 100644 index ccf065ee9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,254 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to GetBucketIntelligentTieringConfiguration include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &GetBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - noSmithyDocumentSerde -} - -func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketIntelligentTieringConfigurationOutput struct { - - // Container for S3 Intelligent-Tiering configuration. - IntelligentTieringConfiguration *types.IntelligentTieringConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketIntelligentTieringConfiguration", - } -} - -// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go deleted file mode 100644 index 8d61953c2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ /dev/null @@ -1,252 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns an inventory configuration (identified by the inventory configuration -// ID) from the bucket. -// -// To use this operation, you must have permissions to perform the -// s3:GetInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. -// -// The following operations are related to GetBucketInventoryConfiguration : -// -// [DeleteBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [PutBucketInventoryConfiguration] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &GetBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketInventoryConfigurationInput struct { - - // The name of the bucket containing the inventory configuration to retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketInventoryConfigurationOutput struct { - - // Specifies the inventory configuration. - InventoryConfiguration *types.InventoryConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketInventoryConfiguration", - } -} - -// getGetBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go deleted file mode 100644 index e21bb044e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ /dev/null @@ -1,318 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see [Object Lifecycle Management]. -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, object size, or any -// combination of these. Accordingly, this section describes the latest API, which -// is compatible with the new functionality. The previous version of the API -// supported filtering based only on an object key name prefix, which is supported -// for general purpose buckets for backward compatibility. For the related API -// description, see [GetBucketLifecycle]. -// -// Lifecyle configurations for directory buckets only support expiring objects and -// cancelling multipart uploads. Expiring of versioned objects, transitions and tag -// filters are not supported. -// -// Permissions -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:GetLifecycleConfiguration permission. -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// GetBucketLifecycleConfiguration has the following special error: -// -// - Error code: NoSuchLifecycleConfiguration -// -// - Description: The lifecycle configuration does not exist. -// -// - HTTP Status Code: 404 Not Found -// -// - SOAP Fault Code Prefix: Client -// -// The following operations are related to GetBucketLifecycleConfiguration : -// -// [GetBucketLifecycle] -// -// [PutBucketLifecycle] -// -// [DeleteBucketLifecycle] -// -// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html -// -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { - if params == nil { - params = &GetBucketLifecycleConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLifecycleConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLifecycleConfigurationInput struct { - - // The name of the bucket for which to get the lifecycle information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLifecycleConfigurationOutput struct { - - // Container for a lifecycle rule. - Rules []types.LifecycleRule - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It isn't supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLifecycleConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketLifecycleConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLifecycleConfiguration", - } -} - -// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLifecycleConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go deleted file mode 100644 index 973f39598..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ /dev/null @@ -1,329 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// This operation is not supported for directory buckets. -// -// Returns the Region the bucket resides in. You set the bucket's Region using the -// LocationConstraint request parameter in a CreateBucket request. For more -// information, see [CreateBucket]. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For -// backward compatibility, Amazon S3 continues to support GetBucketLocation. -// -// The following operations are related to GetBucketLocation : -// -// [GetObject] -// -// [CreateBucket] -// -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html -func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { - if params == nil { - params = &GetBucketLocationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLocationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLocationInput struct { - - // The name of the bucket for which to get the location. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLocationOutput struct { - - // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see [Regions and Endpoints]. Buckets in Region us-east-1 - // have a LocationConstraint of null . - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - LocationConstraint types.BucketLocationConstraint - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLocation"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = swapDeserializerHelper(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -type awsRestxml_deserializeOpGetBucketLocation_custom struct { -} - -func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) - } - output := &GetBucketLocationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{}) - err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) - if err == io.EOF { - err = nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return out, metadata, err -} - -// Helper to swap in a custom deserializer -func swapDeserializerHelper(stack *middleware.Stack) error { - _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{}) - if err != nil { - return err - } - return nil -} - -func (v *GetBucketLocationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLocation", - } -} - -// getGetBucketLocationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLocationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLocationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go deleted file mode 100644 index b774266d3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the logging status of a bucket and the permissions users have to view -// and modify that status. -// -// The following operations are related to GetBucketLogging : -// -// [CreateBucket] -// -// [PutBucketLogging] -// -// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { - if params == nil { - params = &GetBucketLoggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketLoggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketLoggingInput struct { - - // The bucket name for which to get the logging information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketLoggingOutput struct { - - // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API - // Reference. - // - // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html - LoggingEnabled *types.LoggingEnabled - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLogging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketLoggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketLogging", - } -} - -// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketLoggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketLoggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go deleted file mode 100644 index e3b17f1ae..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go +++ /dev/null @@ -1,239 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Retrieves the metadata table configuration for a general purpose bucket. For -// -// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. -// -// Permissions To use this operation, you must have the -// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] -// in the Amazon S3 User Guide. -// -// The following operations are related to GetBucketMetadataTableConfiguration : -// -// [CreateBucketMetadataTableConfiguration] -// -// [DeleteBucketMetadataTableConfiguration] -// -// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html -// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html -// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html -// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html -func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) { - if params == nil { - params = &GetBucketMetadataTableConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketMetadataTableConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketMetadataTableConfigurationInput struct { - - // The general purpose bucket that contains the metadata table configuration that - // you want to retrieve. - // - // This member is required. - Bucket *string - - // The expected owner of the general purpose bucket that you want to retrieve the - // metadata table configuration from. - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketMetadataTableConfigurationOutput struct { - - // The metadata table configuration for the general purpose bucket. - GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketMetadataTableConfiguration", - } -} - -// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketMetadataTableConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go deleted file mode 100644 index a83b72620..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ /dev/null @@ -1,255 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets a metrics configuration (specified by the metrics configuration ID) from -// the bucket. Note that this doesn't include the daily storage metrics. -// -// To use this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to GetBucketMetricsConfiguration : -// -// [PutBucketMetricsConfiguration] -// -// [DeleteBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// [Monitoring Metrics with Amazon CloudWatch] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &GetBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketMetricsConfigurationInput struct { - - // The name of the bucket containing the metrics configuration to retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketMetricsConfigurationOutput struct { - - // Specifies the metrics configuration. - MetricsConfiguration *types.MetricsConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketMetricsConfiguration", - } -} - -// getGetBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go deleted file mode 100644 index 8f744ba30..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ /dev/null @@ -1,278 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the notification configuration of a bucket. -// -// If notifications are not enabled on the bucket, the action returns an empty -// NotificationConfiguration element. -// -// By default, you must be the bucket owner to read the notification configuration -// of a bucket. However, the bucket owner can use a bucket policy to grant -// permission to other users to read this configuration with the -// s3:GetBucketNotification permission. -// -// When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. -// -// When you use this API operation with an Object Lambda access point, provide the -// alias of the Object Lambda access point in place of the bucket name. If the -// Object Lambda access point alias in a request is not valid, the error code -// InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see [List of Error Codes]. -// -// For more information about setting and reading the notification configuration -// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. -// -// The following action is related to GetBucketNotification : -// -// [PutBucketNotification] -// -// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList -// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html -func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { - if params == nil { - params = &GetBucketNotificationConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketNotificationConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketNotificationConfigurationInput struct { - - // The name of the bucket for which to get the notification configuration. - // - // When you use this API operation with an access point, provide the alias of the - // access point in place of the bucket name. - // - // When you use this API operation with an Object Lambda access point, provide the - // alias of the Object Lambda access point in place of the bucket name. If the - // Object Lambda access point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see [List of Error Codes]. - // - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -// A container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off for the bucket. -type GetBucketNotificationConfigurationOutput struct { - - // Enables delivery of events to Amazon EventBridge. - EventBridgeConfiguration *types.EventBridgeConfiguration - - // Describes the Lambda functions to invoke and the events for which to invoke - // them. - LambdaFunctionConfigurations []types.LambdaFunctionConfiguration - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []types.QueueConfiguration - - // The topic to which notifications are sent and the events for which - // notifications are generated. - TopicConfigurations []types.TopicConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketNotificationConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketNotificationConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketNotificationConfiguration", - } -} - -// getGetBucketNotificationConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketNotificationConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go deleted file mode 100644 index ac69a4734..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see [Specifying permissions in a policy]. -// -// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. -// -// The following operations are related to GetBucketOwnershipControls : -// -// # PutBucketOwnershipControls -// -// # DeleteBucketOwnershipControls -// -// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html -func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { - if params == nil { - params = &GetBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketOwnershipControlsInput struct { - - // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketOwnershipControlsOutput struct { - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or - // ObjectWriter) currently in effect for this Amazon S3 bucket. - OwnershipControls *types.OwnershipControls - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketOwnershipControls", - } -} - -// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go deleted file mode 100644 index 0c0a39e71..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ /dev/null @@ -1,305 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the policy of a specified bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the GetBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:GetBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:GetBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] -// in the Amazon S3 User Guide. -// -// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following action is related to GetBucketPolicy : -// -// [GetObject] -// -// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { - if params == nil { - params = &GetBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketPolicyInput struct { - - // The bucket name to get the bucket policy for. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // Access points - When you use this API operation with an access point, provide - // the alias of the access point in place of the bucket name. - // - // Object Lambda access points - When you use this API operation with an Object - // Lambda access point, provide the alias of the Object Lambda access point in - // place of the bucket name. If the Object Lambda access point alias in a request - // is not valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see [List of Error Codes]. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketPolicyOutput struct { - - // The bucket policy as a JSON document. - Policy *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketPolicy", - } -} - -// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go deleted file mode 100644 index a5cac1be1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ /dev/null @@ -1,246 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the -// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 -// permissions, see [Specifying Permissions in a Policy]. -// -// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. -// -// The following operations are related to GetBucketPolicyStatus : -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock] -// -// [PutPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { - if params == nil { - params = &GetBucketPolicyStatusInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketPolicyStatusOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketPolicyStatusInput struct { - - // The name of the Amazon S3 bucket whose policy status you want to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketPolicyStatusOutput struct { - - // The policy status for the specified bucket. - PolicyStatus *types.PolicyStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicyStatus"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketPolicyStatusInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketPolicyStatus", - } -} - -// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketPolicyStatusInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketPolicyStatusBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go deleted file mode 100644 index fac6ec2d9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ /dev/null @@ -1,253 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the replication configuration of a bucket. -// -// It can take a while to propagate the put or delete a replication configuration -// to all Amazon S3 systems. Therefore, a get request soon after put or delete can -// return a wrong result. -// -// For information about replication configuration, see [Replication] in the Amazon S3 User -// Guide. -// -// This action requires permissions for the s3:GetReplicationConfiguration action. -// For more information about permissions, see [Using Bucket Policies and User Policies]. -// -// If you include the Filter element in a replication configuration, you must also -// include the DeleteMarkerReplication and Priority elements. The response also -// returns those elements. -// -// For information about GetBucketReplication errors, see [List of replication-related error codes] -// -// The following operations are related to GetBucketReplication : -// -// [PutBucketReplication] -// -// [DeleteBucketReplication] -// -// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList -// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html -func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { - if params == nil { - params = &GetBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketReplicationInput struct { - - // The bucket name for which to get the replication information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketReplicationOutput struct { - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - ReplicationConfiguration *types.ReplicationConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketReplication", - } -} - -// getGetBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go deleted file mode 100644 index 3398dfc5f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the request payment configuration of a bucket. To use this version of -// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. -// -// The following operations are related to GetBucketRequestPayment : -// -// [ListObjects] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { - if params == nil { - params = &GetBucketRequestPaymentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketRequestPaymentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketRequestPaymentInput struct { - - // The name of the bucket for which to get the payment request configuration - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketRequestPaymentOutput struct { - - // Specifies who pays for the download and request fees. - Payer types.Payer - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketRequestPayment"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketRequestPaymentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketRequestPayment", - } -} - -// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketRequestPaymentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketRequestPaymentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go deleted file mode 100644 index 6ff16896e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ /dev/null @@ -1,245 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the tag set associated with the bucket. -// -// To use this operation, you must have permission to perform the -// s3:GetBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// GetBucketTagging has the following special error: -// -// - Error code: NoSuchTagSet -// -// - Description: There is no tag set associated with the bucket. -// -// The following operations are related to GetBucketTagging : -// -// [PutBucketTagging] -// -// [DeleteBucketTagging] -// -// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html -// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html -func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { - if params == nil { - params = &GetBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketTaggingInput struct { - - // The name of the bucket for which to get the tagging information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketTaggingOutput struct { - - // Contains the tag set. - // - // This member is required. - TagSet []types.Tag - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketTagging", - } -} - -// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go deleted file mode 100644 index 919cf688c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the versioning state of a bucket. -// -// To retrieve the versioning state of a bucket, you must be the bucket owner. -// -// This implementation also returns the MFA Delete status of the versioning state. -// If the MFA Delete status is enabled , the bucket owner must use an -// authentication device to change the versioning state of the bucket. -// -// The following operations are related to GetBucketVersioning : -// -// [GetObject] -// -// [PutObject] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { - if params == nil { - params = &GetBucketVersioningInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketVersioningOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketVersioningInput struct { - - // The name of the bucket for which to get the versioning information. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketVersioningOutput struct { - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA delete. - // If the bucket has never been so configured, this element is not returned. - MFADelete types.MFADeleteStatus - - // The versioning state of the bucket. - Status types.BucketVersioningStatus - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketVersioning"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketVersioningInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketVersioning", - } -} - -// getGetBucketVersioningBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketVersioningInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketVersioningBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go deleted file mode 100644 index 3758b8789..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ /dev/null @@ -1,251 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the website configuration for a bucket. To host website on Amazon S3, -// you can configure a bucket as website by adding a website configuration. For -// more information about hosting websites, see [Hosting Websites on Amazon S3]. -// -// This GET action requires the S3:GetBucketWebsite permission. By default, only -// the bucket owner can read the bucket website configuration. However, bucket -// owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. -// -// The following operations are related to GetBucketWebsite : -// -// [DeleteBucketWebsite] -// -// [PutBucketWebsite] -// -// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html -func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { - if params == nil { - params = &GetBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetBucketWebsiteInput struct { - - // The bucket name for which to get the website configuration. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetBucketWebsiteOutput struct { - - // The object key name of the website error document to use for 4XX class errors. - ErrorDocument *types.ErrorDocument - - // The name of the index document for the website (for example index.html ). - IndexDocument *types.IndexDocument - - // Specifies the redirect behavior of all requests to a website endpoint of an - // Amazon S3 bucket. - RedirectAllRequestsTo *types.RedirectAllRequestsTo - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []types.RoutingRule - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetBucketWebsite", - } -} - -// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*GetBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go deleted file mode 100644 index c2b0c7f9d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ /dev/null @@ -1,882 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "time" -) - -// Retrieves an object from Amazon S3. -// -// In the GetObject request, specify the full key name for the object. -// -// General purpose buckets - Both the virtual-hosted-style requests and the -// path-style requests are supported. For a virtual hosted-style request example, -// if you have the object photos/2006/February/sample.jpg , specify the object key -// name as /photos/2006/February/sample.jpg . For a path-style request example, if -// you have the object photos/2006/February/sample.jpg in the bucket named -// examplebucket , specify the object key name as -// /examplebucket/photos/2006/February/sample.jpg . For more information about -// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. -// -// Directory buckets - Only virtual-hosted-style requests are supported. For a -// virtual hosted-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named -// examplebucket--use1-az5--x-s3 , specify the object key name as -// /photos/2006/February/sample.jpg . Also, when you make requests to this API -// operation, your requests are sent to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - You must have the required permissions -// in a policy. To use GetObject , you must have the READ access to the object -// (or version). If you grant READ access to the anonymous user, the GetObject -// operation returns the object without using an authorization header. For more -// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. -// -// If you include a versionId in your request header, you must have the -// -// s3:GetObjectVersion permission to access a specific version of an object. The -// s3:GetObject permission is not required in this scenario. -// -// If you request the current version of an object without a specific versionId in -// -// the request header, only the s3:GetObject permission is required. The -// s3:GetObjectVersion permission is not required in this scenario. -// -// If the object that you request doesn’t exist, the error that Amazon S3 returns -// -// depends on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted using SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Storage classes If the object you are retrieving is stored in the S3 Glacier -// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the -// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep -// Archive Access tier, before you can retrieve the object you must first restore a -// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, only the S3 Express One Zone storage -// class is supported to store newly created objects. Unsupported storage class -// values won't write a destination object and will respond with the HTTP status -// code 400 Bad Request . -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for the GetObject requests, if your object uses server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you -// include the header in your GetObject requests for the object that uses these -// types of keys, you’ll get an HTTP 400 Bad Request error. -// -// Directory buckets - For directory buckets, there are only two supported options -// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// Overriding response header values through the request There are times when you -// want to override certain response header values of a GetObject response. For -// example, you might override the Content-Disposition response header value -// through your GetObject request. -// -// You can override values for a set of response headers. These modified response -// header values are included only in a successful response, that is, when the HTTP -// status code 200 OK is returned. The headers you can override using the -// following query parameters in the request are a subset of the headers that -// Amazon S3 accepts when you create an object. -// -// The response headers that you can override for the GetObject response are -// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , -// Content-Type , and Expires . -// -// To override values for a set of response headers in the GetObject response, you -// can use the following query parameters in the request. -// -// - response-cache-control -// -// - response-content-disposition -// -// - response-content-encoding -// -// - response-content-language -// -// - response-content-type -// -// - response-expires -// -// When you use these parameters, you must sign the request by using either an -// Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to GetObject : -// -// [ListBuckets] -// -// [GetObjectAcl] -// -// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { - if params == nil { - params = &GetObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectInput struct { - - // The bucket name containing the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points - When you use this action with an Object Lambda - // access point, you must direct requests to the Object Lambda access point - // hostname. The Object Lambda access point hostname takes the form - // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key of the object to get. - // - // This member is required. - Key *string - - // To retrieve the checksum, this mode must be enabled. - // - // General purpose buckets - In addition, if you enable checksum mode and the - // object is uploaded with a [checksum]and encrypted with an Key Management Service (KMS) - // key, you must have permission to use the kms:Decrypt action to retrieve the - // checksum. - // - // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html - ChecksumMode types.ChecksumMode - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: If-Match condition evaluates to true , and; - // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and - // the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: If-None-Match condition evaluates to false , and; - // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not - // Modified status code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfModifiedSince *time.Time - - // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: If-None-Match condition evaluates to false , and; - // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not - // Modified HTTP status code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: If-Match condition evaluates to true , and; - // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and - // the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfUnmodifiedSince *time.Time - - // Part number of the object being read. This is a positive integer between 1 and - // 10,000. Effectively performs a 'ranged' GET request for the part specified. - // Useful for downloading just a part of an object. - PartNumber *int32 - - // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. - // - // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range - Range *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string - - // Sets the Content-Disposition header of the response. - ResponseContentDisposition *string - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string - - // Sets the Content-Type header of the response. - ResponseContentType *string - - // Sets the Expires header of the response. - ResponseExpires *time.Time - - // Specifies the algorithm to use when decrypting the object (for example, AES256 ). - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key that you originally provided for - // Amazon S3 to encrypt the data before storing it. This value is used to decrypt - // the object when recovering it and must match the one used when storing the data. - // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the customer-provided encryption key - // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. - // - // If you encrypt an object by using server-side encryption with customer-provided - // encryption keys (SSE-C) when you store the object in Amazon S3, then when you - // GET the object, you must use the following headers: - // - // - x-amz-server-side-encryption-customer-algorithm - // - // - x-amz-server-side-encryption-customer-key - // - // - x-amz-server-side-encryption-customer-key-MD5 - // - // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // Version ID used to reference a specific version of the object. - // - // By default, the GetObject operation returns the current version of an object. - // To return a different version, use the versionId subresource. - // - // - If you include a versionId in your request header, you must have the - // s3:GetObjectVersion permission to access a specific version of an object. The - // s3:GetObject permission is not required in this scenario. - // - // - If you request the current version of an object without a specific versionId - // in the request header, only the s3:GetObject permission is required. The - // s3:GetObjectVersion permission is not required in this scenario. - // - // - Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. For this API operation, only the null value of the version ID is - // supported by directory buckets. You can only specify null to the versionId - // query parameter in the request. - // - // For more information about versioning, see [PutBucketVersioning]. - // - // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type GetObjectOutput struct { - - // Indicates that a range of bytes was specified in the request. - AcceptRanges *string - - // Object data. - Body io.ReadCloser - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more - // information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in the CreateMultipartUpload request. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Indicates what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. - ContentLength *int64 - - // The portion of the object returned in the response. - ContentRange *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Indicates whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - // - // - If the current version of the object is a delete marker, Amazon S3 behaves - // as if the object was deleted and includes x-amz-delete-marker: true in the - // response. - // - // - If the specified version in the request is a delete marker, the response - // returns a 405 Method Not Allowed error and the Last-Modified: timestamp - // response header. - DeleteMarker *bool - - // An entity tag (ETag) is an opaque identifier assigned by a web server to a - // specific version of a resource found at a URL. - ETag *string - - // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // The date and time at which the object is no longer cacheable. - // - // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using - // the ExpiresString field which contains the unparsed value from the service - // response. - Expires *time.Time - - // The unparsed value of the Expires field from the service response. Prefer use - // of this value over the normal Expires response field where possible. - ExpiresString *string - - // Date and time when the object was last modified. - // - // General purpose buckets - When you specify a versionId of the object in your - // request, if the specified version in the request is a delete marker, the - // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp - // response header. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - // - // Map keys will be normalized to lower-case. - Metadata map[string]string - - // This is set to the number of metadata entries not returned in the headers that - // are prefixed with x-amz-meta- . This can happen if you create metadata using an - // API like SOAP that supports more flexible metadata than the REST API. For - // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. - // - // This functionality is not supported for directory buckets. - MissingMeta *int32 - - // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. - // - // This functionality is not supported for directory buckets. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that's currently in place for this object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when this object's Object Lock will expire. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int32 - - // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. - // - // This functionality is not supported for directory buckets. - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Provides information about object restoration action and expiration time of the - // restored object copy. - // - // This functionality is not supported for directory buckets. Only the S3 Express - // One Zone storage class is supported by directory buckets to store objects. - Restore *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3. - ServerSideEncryption types.ServerSideEncryption - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - StorageClass types.StorageClass - - // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. - // - // You can use [GetObjectTagging] to retrieve the tag set associated with an object. - // - // This functionality is not supported for directory buckets. - // - // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html - TagCount *int32 - - // Version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addResponseChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpGetObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addGetObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObject", - } -} - -// getGetObjectRequestValidationModeMember gets the request checksum validation -// mode provided as input. -func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { - in := input.(*GetObjectInput) - if len(in.ChecksumMode) == 0 { - return "", false - } - return string(in.ChecksumMode), true -} - -func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ - GetValidationMode: getGetObjectRequestValidationModeMember, - ResponseChecksumValidation: options.ResponseChecksumValidation, - ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"}, - IgnoreMultipartValidation: true, - LogValidationSkipped: true, - LogMultipartValidationSkipped: true, - }) -} - -// getGetObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getGetObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignGetObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &GetObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns, - c.client.addOperationGetObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addGetObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go deleted file mode 100644 index fde39a969..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the access control list (ACL) of an object. To use this operation, you -// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For -// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// By default, GET returns ACL information about the current version of an object. -// To return ACL information about a different version, use the versionId -// subresource. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// requests to read ACLs are still supported and return the -// bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. -// -// The following operations are related to GetObjectAcl : -// -// [GetObject] -// -// [GetObjectAttributes] -// -// [DeleteObject] -// -// [PutObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { - if params == nil { - params = &GetObjectAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectAclInput struct { - - // The bucket name that contains the object for which to get the ACL information. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key of the object for which to get the ACL information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type GetObjectAclOutput struct { - - // A list of grants. - Grants []types.Grant - - // Container for the bucket owner's display name and ID. - Owner *types.Owner - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectAcl", - } -} - -// getGetObjectAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectAclBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go deleted file mode 100644 index ee210cc94..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ /dev/null @@ -1,503 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Retrieves all the metadata from an object without returning the object itself. -// This operation is useful if you're interested only in an object's metadata. -// -// GetObjectAttributes combines the functionality of HeadObject and ListParts . All -// of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - To use GetObjectAttributes , you must -// have READ access to the object. The permissions that you need to use this -// operation depend on whether the bucket is versioned. If the bucket is versioned, -// you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes -// permissions for this operation. If the bucket is not versioned, you need the -// s3:GetObject and s3:GetObjectAttributes permissions. For more information, see [Specifying Permissions in a Policy] -// in the Amazon S3 User Guide. If the object that you request does not exist, the -// error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found ("no such key") error. -// -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden ("access denied") error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for HEAD requests if your object uses server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3). The -// x-amz-server-side-encryption header is used when you PUT an object to S3 and -// want to specify the encryption method. If you include this header in a GET -// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad -// Request error. It's because the encryption method can't be changed when you -// retrieve the object. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when you -// retrieve the metadata from the object, you must use the following headers to -// provide the encryption key for the server to be able to retrieve the object's -// metadata. The headers are: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. -// -// Directory bucket permissions - For directory buckets, there are only two -// supported options for server-side encryption: server-side encryption with Amazon -// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses -// the desired encryption configuration and you don't override the bucket default -// encryption in your CreateSession requests or PUT object requests. Then, new -// objects are automatically encrypted with the desired encryption settings. For -// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about -// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. -// -// Versioning Directory buckets - S3 Versioning isn't enabled and supported for -// directory buckets. For this API operation, only the null value of the version -// ID is supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// Conditional request headers Consider the following when using request headers: -// -// - If both of the If-Match and If-Unmodified-Since headers are present in the -// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the -// data requested: -// -// - If-Match condition evaluates to true . -// -// - If-Unmodified-Since condition evaluates to false . -// -// For more information about conditional requests, see [RFC 7232]. -// -// - If both of the If-None-Match and If-Modified-Since headers are present in -// the request as follows, then Amazon S3 returns the HTTP status code 304 Not -// Modified : -// -// - If-None-Match condition evaluates to false . -// -// - If-Modified-Since condition evaluates to true . -// -// For more information about conditional requests, see [RFC 7232]. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following actions are related to GetObjectAttributes : -// -// [GetObject] -// -// [GetObjectAcl] -// -// [GetObjectLegalHold] -// -// [GetObjectLockConfiguration] -// -// [GetObjectRetention] -// -// [GetObjectTagging] -// -// [HeadObject] -// -// [ListParts] -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [RFC 7232]: https://tools.ietf.org/html/rfc7232 -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html -// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { - if params == nil { - params = &GetObjectAttributesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectAttributesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectAttributesInput struct { - - // The name of the bucket that contains the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The object key. - // - // This member is required. - Key *string - - // Specifies the fields at the root level that you want returned in the response. - // Fields that you do not specify are not returned. - // - // This member is required. - ObjectAttributes []types.ObjectAttributes - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Sets the maximum number of parts to return. - MaxParts *int32 - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // The version ID used to reference a specific version of the object. - // - // S3 Versioning isn't enabled and supported for directory buckets. For this API - // operation, only the null value of the version ID is supported by directory - // buckets. You can only specify null to the versionId query parameter in the - // request. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectAttributesOutput struct { - - // The checksum or digest of the object. - Checksum *types.Checksum - - // Specifies whether the object retrieved was ( true ) or was not ( false ) a - // delete marker. If false , this response header does not appear in the response. - // - // This functionality is not supported for directory buckets. - DeleteMarker *bool - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL. - ETag *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // A collection of parts associated with a multipart upload. - ObjectParts *types.GetObjectAttributesParts - - // The size of the object in bytes. - ObjectSize *int64 - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAttributes"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectAttributesInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectAttributes", - } -} - -// getGetObjectAttributesBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectAttributesInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectAttributesBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go deleted file mode 100644 index f341d2adb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets an object's current legal hold status. For more information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectLegalHold : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { - if params == nil { - params = &GetObjectLegalHoldInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectLegalHoldOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectLegalHoldInput struct { - - // The bucket name containing the object whose legal hold status you want to - // retrieve. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object whose legal hold status you want to retrieve. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID of the object whose legal hold status you want to retrieve. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectLegalHoldOutput struct { - - // The current legal hold status for the specified object. - LegalHold *types.ObjectLockLegalHold - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLegalHold"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectLegalHoldInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectLegalHold", - } -} - -// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectLegalHoldInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectLegalHoldBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go deleted file mode 100644 index 7b34287c8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object placed -// in the specified bucket. For more information, see [Locking Objects]. -// -// The following action is related to GetObjectLockConfiguration : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { - if params == nil { - params = &GetObjectLockConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectLockConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectLockConfigurationInput struct { - - // The bucket whose Object Lock configuration you want to retrieve. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectLockConfigurationOutput struct { - - // The specified bucket's Object Lock configuration. - ObjectLockConfiguration *types.ObjectLockConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLockConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectLockConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectLockConfiguration", - } -} - -// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectLockConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectLockConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go deleted file mode 100644 index 7a72820ec..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves an object's retention settings. For more information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectRetention : -// -// [GetObjectAttributes] -// -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { - if params == nil { - params = &GetObjectRetentionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectRetentionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectRetentionInput struct { - - // The bucket name containing the object whose retention settings you want to - // retrieve. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object whose retention settings you want to retrieve. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID for the object whose retention settings you want to retrieve. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectRetentionOutput struct { - - // The container element for an object's retention settings. - Retention *types.ObjectLockRetention - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectRetention"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectRetentionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectRetention", - } -} - -// getGetObjectRetentionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectRetentionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectRetentionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go deleted file mode 100644 index 805d16c22..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ /dev/null @@ -1,294 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. -// -// To use this operation, you must have permission to perform the -// s3:GetObjectTagging action. By default, the GET action returns information about -// current version of an object. For a versioned bucket, you can have multiple -// versions of an object in your bucket. To retrieve tags of any other version, use -// the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. -// -// By default, the bucket owner has this permission and can grant this permission -// to others. -// -// For information about the Amazon S3 object tagging feature, see [Object Tagging]. -// -// The following actions are related to GetObjectTagging : -// -// [DeleteObjectTagging] -// -// [GetObjectAttributes] -// -// [PutObjectTagging] -// -// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html -// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html -func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { - if params == nil { - params = &GetObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectTaggingInput struct { - - // The bucket name containing the object for which to get the tagging information. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which to get the tagging information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The versionId of the object for which to get the tagging information. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectTaggingOutput struct { - - // Contains the tag set. - // - // This member is required. - TagSet []types.Tag - - // The versionId of the object for which you got the tagging information. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectTagging", - } -} - -// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go deleted file mode 100644 index dc314d663..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ /dev/null @@ -1,259 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// This operation is not supported for directory buckets. -// -// Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. -// -// You can get torrent only for objects that are less than 5 GB in size, and that -// are not encrypted using server-side encryption with a customer-provided -// encryption key. -// -// To use GET, you must have READ access to the object. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// The following action is related to GetObjectTorrent : -// -// [GetObject] -// -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { - if params == nil { - params = &GetObjectTorrentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetObjectTorrentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetObjectTorrentInput struct { - - // The name of the bucket containing the object for which to get the torrent files. - // - // This member is required. - Bucket *string - - // The object key for which to get the information. - // - // This member is required. - Key *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type GetObjectTorrentOutput struct { - - // A Bencoded dictionary as defined by the BitTorrent specification - Body io.ReadCloser - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTorrent"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetObjectTorrentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetObjectTorrent", - } -} - -// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) { - in := input.(*GetObjectTorrentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetObjectTorrentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go deleted file mode 100644 index bd760d74c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ /dev/null @@ -1,255 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For -// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the -// PublicAccessBlock settings are different between the bucket and the account, -// Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see [The Meaning of "Public"]. -// -// The following operations are related to GetPublicAccessBlock : -// -// [Using Amazon S3 Block Public Access] -// -// [PutPublicAccessBlock] -// -// [GetPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { - if params == nil { - params = &GetPublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetPublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetPublicAccessBlockInput struct { - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want - // to retrieve. - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type GetPublicAccessBlockOutput struct { - - // The PublicAccessBlock configuration currently in effect for this Amazon S3 - // bucket. - PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *GetPublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetPublicAccessBlock", - } -} - -// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*GetPublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getGetPublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go deleted file mode 100644 index a26cefd02..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ /dev/null @@ -1,723 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "time" -) - -// You can use this operation to determine if a bucket exists and if you have -// permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. -// -// If the bucket does not exist or you do not have permission to access it, the -// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found -// code. A message body is not included, so you cannot determine the exception -// beyond these HTTP response codes. -// -// Authentication and authorization General purpose buckets - Request to public -// buckets that grant the s3:ListBucket permission publicly do not need to be -// signed. All other HeadBucket requests must be authenticated and signed by using -// IAM credentials (access key ID and secret access key for the IAM identities). -// All headers with the x-amz- prefix, including x-amz-copy-source , must be -// signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use IAM credentials to authenticate and authorize -// your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions -// -// - General purpose bucket permissions - To use this operation, you must have -// permissions to perform the s3:ListBucket action. The bucket owner has this -// permission by default and can grant this permission to others. For more -// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have the s3express:CreateSession -// permission in the Action element of a policy. By default, the session is in -// the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. -// -// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 -// -// User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// You must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style -// requests are not supported. For more information about endpoints in Availability -// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in -// Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { - if params == nil { - params = &HeadBucketInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*HeadBucketOutput) - out.ResultMetadata = metadata - return out, nil -} - -type HeadBucketInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Object Lambda access points - When you use this API operation with an Object - // Lambda access point, provide the alias of the Object Lambda access point in - // place of the bucket name. If the Object Lambda access point alias in a request - // is not valid, the error code InvalidAccessPointAliasError is returned. For more - // information about InvalidAccessPointAliasError , see [List of Error Codes]. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList - // - // This member is required. - Bucket *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type HeadBucketOutput struct { - - // Indicates whether the bucket name used in the request is an access point alias. - // - // For directory buckets, the value of this field is false . - AccessPointAlias *bool - - // The name of the location where the bucket will be created. - // - // For directory buckets, the Zone ID of the Availability Zone or the Local Zone - // where the bucket is created. An example Zone ID value for an Availability Zone - // is usw2-az1 . - // - // This functionality is only supported by directory buckets. - BucketLocationName *string - - // The type of location where the bucket is created. - // - // This functionality is only supported by directory buckets. - BucketLocationType types.LocationType - - // The Region that the bucket is located. - BucketRegion *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "HeadBucket"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpHeadBucketValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter -type BucketExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) -} - -// BucketExistsWaiter defines the waiters for BucketExists -type BucketExistsWaiter struct { - client HeadBucketAPIClient - - options BucketExistsWaiterOptions -} - -// NewBucketExistsWaiter constructs a BucketExistsWaiter. -func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter { - options := BucketExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = bucketExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &BucketExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. -func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for BucketExists waiter and returns the -// output of the successful operation. The maxWaitDur is the maximum wait duration -// the waiter will wait. The maxWaitDur is required and must be greater than zero. -func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadBucket(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter") -} - -func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { - - if err == nil { - return false, nil - } - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return true, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter -type BucketNotExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. - // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) -} - -// BucketNotExistsWaiter defines the waiters for BucketNotExists -type BucketNotExistsWaiter struct { - client HeadBucketAPIClient - - options BucketNotExistsWaiterOptions -} - -// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter. -func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter { - options := BucketNotExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = bucketNotExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &BucketNotExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is -// the maximum wait duration the waiter will wait. The maxWaitDur is required and -// must be greater than zero. -func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for BucketNotExists waiter and returns -// the output of the successful operation. The maxWaitDur is the maximum wait -// duration the waiter will wait. The maxWaitDur is required and must be greater -// than zero. -func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadBucket(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter") -} - -func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) { - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return false, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) -} - -var _ HeadBucketAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "HeadBucket", - } -} - -// getHeadBucketBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getHeadBucketBucketMember(input interface{}) (*string, bool) { - in := input.(*HeadBucketInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getHeadBucketBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignHeadBucket is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignHeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &HeadBucketInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "HeadBucket", params, clientOptFns, - c.client.addOperationHeadBucketMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addHeadBucketPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addHeadBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go deleted file mode 100644 index e943c2b68..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ /dev/null @@ -1,1198 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - smithyhttp "github.com/aws/smithy-go/transport/http" - smithywaiter "github.com/aws/smithy-go/waiter" - "time" -) - -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're interested only in an object's -// metadata. -// -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. Because of this, if the HEAD request generates an error, it returns a -// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 -// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. -// -// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. -// -// Permissions -// -// - General purpose bucket permissions - To use HEAD , you must have the -// s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 -// User Guide. For more information about the permissions to S3 API operations by -// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. -// -// If the object you request doesn't exist, the error that Amazon S3 returns -// -// depends on whether you also have the s3:ListBucket permission. -// -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If you enable x-amz-checksum-mode in the request and the object is encrypted -// -// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you -// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM -// identity-based policies and KMS key policies for the KMS key to retrieve the -// checksum of the object. -// -// Encryption Encryption request headers, like x-amz-server-side-encryption , -// should not be sent for HEAD requests if your object uses server-side encryption -// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side -// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side -// encryption with Amazon S3 managed encryption keys (SSE-S3). The -// x-amz-server-side-encryption header is used when you PUT an object to S3 and -// want to specify the encryption method. If you include this header in a HEAD -// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad -// Request error. It's because the encryption method can't be changed when you -// retrieve the object. -// -// If you encrypt an object by using server-side encryption with customer-provided -// encryption keys (SSE-C) when you store the object in Amazon S3, then when you -// retrieve the metadata from the object, you must use the following headers to -// provide the encryption key for the server to be able to retrieve the object's -// metadata. The headers are: -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. -// -// Directory bucket - For directory buckets, there are only two supported options -// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more -// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// Versioning -// -// - If the current version of the object is a delete marker, Amazon S3 behaves -// as if the object was deleted and includes x-amz-delete-marker: true in the -// response. -// -// - If the specified version is a delete marker, the response returns a 405 -// Method Not Allowed error and the Last-Modified: timestamp response header. -// -// - Directory buckets - Delete marker is not supported for directory buckets. -// -// - Directory buckets - S3 Versioning isn't enabled and supported for directory -// buckets. For this API operation, only the null value of the version ID is -// supported by directory buckets. You can only specify null to the versionId -// query parameter in the request. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// For directory buckets, you must make requests for this API operation to the -// Zonal endpoint. These endpoints support virtual-hosted-style requests in the -// format https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name -// . Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// The following actions are related to HeadObject : -// -// [GetObject] -// -// [GetObjectAttributes] -// -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { - if params == nil { - params = &HeadObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*HeadObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type HeadObjectInput struct { - - // The name of the bucket that contains the object. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The object key. - // - // This member is required. - Key *string - - // To retrieve the checksum, this parameter must be enabled. - // - // General purpose buckets - If you enable checksum mode and the object is - // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you - // must have permission to use the kms:Decrypt action to retrieve the checksum. - // - // Directory buckets - If you enable ChecksumMode and the object is encrypted with - // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must - // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM - // identity-based policies and KMS key policies for the KMS key to retrieve the - // checksum of the object. - // - // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html - ChecksumMode types.ChecksumMode - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: - // - // - If-Match condition evaluates to true , and; - // - // - If-Unmodified-Since condition evaluates to false ; - // - // Then Amazon S3 returns 200 OK and the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: - // - // - If-None-Match condition evaluates to false , and; - // - // - If-Modified-Since condition evaluates to true ; - // - // Then Amazon S3 returns the 304 Not Modified response code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfModifiedSince *time.Time - - // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. - // - // If both of the If-None-Match and If-Modified-Since headers are present in the - // request as follows: - // - // - If-None-Match condition evaluates to false , and; - // - // - If-Modified-Since condition evaluates to true ; - // - // Then Amazon S3 returns the 304 Not Modified response code. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. - // - // If both of the If-Match and If-Unmodified-Since headers are present in the - // request as follows: - // - // - If-Match condition evaluates to true , and; - // - // - If-Unmodified-Since condition evaluates to false ; - // - // Then Amazon S3 returns 200 OK and the data requested. - // - // For more information about conditional requests, see [RFC 7232]. - // - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfUnmodifiedSince *time.Time - - // Part number of the object being read. This is a positive integer between 1 and - // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. - // Useful querying about the size of the part and the number of parts in this - // object. - PartNumber *int32 - - // HeadObject returns only the metadata for an object. If the Range is - // satisfiable, only the ContentLength is affected in the response. If the Range - // is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. - Range *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string - - // Sets the Content-Disposition header of the response. - ResponseContentDisposition *string - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string - - // Sets the Content-Type header of the response. - ResponseContentType *string - - // Sets the Expires header of the response. - ResponseExpires *time.Time - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Version ID used to reference a specific version of the object. - // - // For directory buckets in this API operation, only the null value of the version - // ID is supported. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type HeadObjectOutput struct { - - // Indicates that a range of bytes was specified. - AcceptRanges *string - - // The archive state of the head object. - // - // This functionality is not supported for directory buckets. - ArchiveStatus types.ArchiveStatus - - // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is - // only present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the object. For more - // information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in CreateMultipartUpload request. For more information, - // see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Specifies presentational information for the object. - ContentDisposition *string - - // Indicates what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. - ContentLength *int64 - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - // - // This functionality is not supported for directory buckets. - DeleteMarker *bool - - // An entity tag (ETag) is an opaque identifier assigned by a web server to a - // specific version of a resource found at a URL. - ETag *string - - // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // The date and time at which the object is no longer cacheable. - // - // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using - // the ExpiresString field which contains the unparsed value from the service - // response. - Expires *time.Time - - // The unparsed value of the Expires field from the service response. Prefer use - // of this value over the normal Expires response field where possible. - ExpiresString *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - // - // Map keys will be normalized to lower-case. - Metadata map[string]string - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. - // - // This functionality is not supported for directory buckets. - MissingMeta *int32 - - // Specifies whether a legal hold is in effect for this object. This header is - // only returned if the requester has the s3:GetObjectLegalHold permission. This - // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode, if any, that's in effect for this object. This header is - // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see [Object Lock]. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockMode types.ObjectLockMode - - // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. This value is only returned if you specify - // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount *int32 - - // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. - // - // In replication, you have a source bucket on which you configure replication and - // destination bucket or buckets where Amazon S3 stores object replicas. When you - // request an object ( GetObject ) or object metadata ( HeadObject ) from these - // buckets, Amazon S3 will return the x-amz-replication-status header in the - // response as follows: - // - // - If requesting an object from the source bucket, Amazon S3 will return the - // x-amz-replication-status header if the object in your request is eligible for - // replication. - // - // For example, suppose that in your replication configuration, you specify object - // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix - // TaxDocs . Any objects you upload with this key name prefix, for example - // TaxDocs/document1.pdf , are eligible for replication. For any object request - // with this key name prefix, Amazon S3 will return the x-amz-replication-status - // header with value PENDING, COMPLETED or FAILED indicating object replication - // status. - // - // - If requesting an object from a destination bucket, Amazon S3 will return - // the x-amz-replication-status header with value REPLICA if the object in your - // request is a replica that Amazon S3 created and there is no replica modification - // replication in progress. - // - // - When replicating objects to multiple destination buckets, the - // x-amz-replication-status header acts differently. The header of the source - // object will only return a value of COMPLETED when replication is successful to - // all destinations. The header will remain at value PENDING until replication has - // completed for all destinations. If one or more destinations fails replication - // the header will return FAILED. - // - // For more information, see [Replication]. - // - // This functionality is not supported for directory buckets. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If the object is an archived object (an object whose storage class is GLACIER), - // the response includes this header if either the archive restoration is in - // progress (see [RestoreObject]or an archive copy is already restored. - // - // If an archive copy is already restored, the header value indicates when Amazon - // S3 is scheduled to delete the object copy. For example: - // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 - // GMT" - // - // If the object restoration is in progress, the header returns the value - // ongoing-request="true" . - // - // For more information about archiving objects, see [Transitioning Objects: General Considerations]. - // - // This functionality is not supported for directory buckets. Only the S3 Express - // One Zone storage class is supported by directory buckets to store objects. - // - // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations - // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html - Restore *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). - ServerSideEncryption types.ServerSideEncryption - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // Version ID of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. - // - // This functionality is not supported for directory buckets. - WebsiteRedirectLocation *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "HeadObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpHeadObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter -type ObjectExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) -} - -// ObjectExistsWaiter defines the waiters for ObjectExists -type ObjectExistsWaiter struct { - client HeadObjectAPIClient - - options ObjectExistsWaiterOptions -} - -// NewObjectExistsWaiter constructs a ObjectExistsWaiter. -func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter { - options := ObjectExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = objectExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &ObjectExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. -func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for ObjectExists waiter and returns the -// output of the successful operation. The maxWaitDur is the maximum wait duration -// the waiter will wait. The maxWaitDur is required and must be greater than zero. -func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadObject(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter") -} - -func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { - - if err == nil { - return false, nil - } - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return true, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter -type ObjectNotExistsWaiterOptions struct { - - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - // - // Passing options here is functionally equivalent to passing values to this - // config's ClientOptions field that extend the inner client's APIOptions directly. - APIOptions []func(*middleware.Stack) error - - // Functional options to be passed to all operations invoked by this client. - // - // Function values that modify the inner APIOptions are applied after the waiter - // config's own APIOptions modifiers. - ClientOptions []func(*Options) - - // MinDelay is the minimum amount of time to delay between retries. If unset, - // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that - // MinDelay must resolve to a value lesser than or equal to the MaxDelay. - MinDelay time.Duration - - // MaxDelay is the maximum amount of time to delay between retries. If unset or - // set to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. - // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. - MaxDelay time.Duration - - // LogWaitAttempts is used to enable logging for waiter retry attempts - LogWaitAttempts bool - - // Retryable is function that can be used to override the service defined - // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. - // - // By default service-modeled logic will populate this option. This option can - // thus be used to define a custom waiter state with fall-back to service-modeled - // waiter state mutators.The function returns an error in case of a failure state. - // In case of retry state, this function returns a bool value of true and nil - // error, while in case of success it returns a bool value of false and nil error. - Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) -} - -// ObjectNotExistsWaiter defines the waiters for ObjectNotExists -type ObjectNotExistsWaiter struct { - client HeadObjectAPIClient - - options ObjectNotExistsWaiterOptions -} - -// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter. -func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter { - options := ObjectNotExistsWaiterOptions{} - options.MinDelay = 5 * time.Second - options.MaxDelay = 120 * time.Second - options.Retryable = objectNotExistsStateRetryable - - for _, fn := range optFns { - fn(&options) - } - return &ObjectNotExistsWaiter{ - client: client, - options: options, - } -} - -// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is -// the maximum wait duration the waiter will wait. The maxWaitDur is required and -// must be greater than zero. -func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { - _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) - return err -} - -// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns -// the output of the successful operation. The maxWaitDur is the maximum wait -// duration the waiter will wait. The maxWaitDur is required and must be greater -// than zero. -func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) { - if maxWaitDur <= 0 { - return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") - } - - options := w.options - for _, fn := range optFns { - fn(&options) - } - - if options.MaxDelay <= 0 { - options.MaxDelay = 120 * time.Second - } - - if options.MinDelay > options.MaxDelay { - return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) - } - - ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) - defer cancelFn() - - logger := smithywaiter.Logger{} - remainingTime := maxWaitDur - - var attempt int64 - for { - - attempt++ - apiOptions := options.APIOptions - start := time.Now() - - if options.LogWaitAttempts { - logger.Attempt = attempt - apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) - apiOptions = append(apiOptions, logger.AddLogger) - } - - out, err := w.client.HeadObject(ctx, params, func(o *Options) { - baseOpts := []func(*Options){ - addIsWaiterUserAgent, - } - o.APIOptions = append(o.APIOptions, apiOptions...) - for _, opt := range baseOpts { - opt(o) - } - for _, opt := range options.ClientOptions { - opt(o) - } - }) - - retryable, err := options.Retryable(ctx, params, out, err) - if err != nil { - return nil, err - } - if !retryable { - return out, nil - } - - remainingTime -= time.Since(start) - if remainingTime < options.MinDelay || remainingTime <= 0 { - break - } - - // compute exponential backoff between waiter retries - delay, err := smithywaiter.ComputeDelay( - attempt, options.MinDelay, options.MaxDelay, remainingTime, - ) - if err != nil { - return nil, fmt.Errorf("error computing waiter delay, %w", err) - } - - remainingTime -= delay - // sleep for the delay amount before invoking a request - if err := smithytime.SleepWithContext(ctx, delay); err != nil { - return nil, fmt.Errorf("request cancelled while waiting, %w", err) - } - } - return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter") -} - -func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { - - if err != nil { - var errorType *types.NotFound - if errors.As(err, &errorType) { - return false, nil - } - } - - if err != nil { - return false, err - } - return true, nil -} - -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) -} - -var _ HeadObjectAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "HeadObject", - } -} - -// getHeadObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getHeadObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*HeadObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getHeadObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignHeadObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &HeadObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns, - c.client.addOperationHeadObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - addHeadObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go deleted file mode 100644 index dd35f119c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element in -// the response. If there are no more configurations to list, IsTruncated is set -// to false. If there are more configurations to list, IsTruncated is set to true, -// and there will be a value in NextContinuationToken . You use the -// NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// The following operations are related to ListBucketAnalyticsConfigurations : -// -// [GetBucketAnalyticsConfiguration] -// -// [DeleteBucketAnalyticsConfiguration] -// -// [PutBucketAnalyticsConfiguration] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { - if params == nil { - params = &ListBucketAnalyticsConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketAnalyticsConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketAnalyticsConfigurationsInput struct { - - // The name of the bucket from which analytics configurations are retrieved. - // - // This member is required. - Bucket *string - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketAnalyticsConfigurationsOutput struct { - - // The list of analytics configurations for a bucket. - AnalyticsConfigurationList []types.AnalyticsConfiguration - - // The marker that is used as a starting point for this analytics configuration - // list response. This value is present if it was sent in the request. - ContinuationToken *string - - // Indicates whether the returned list of analytics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // NextContinuationToken is sent when isTruncated is true, which indicates that - // there are more analytics configurations to list. The next request must include - // this NextContinuationToken . The token is obfuscated and is not a usable value. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketAnalyticsConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketAnalyticsConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketAnalyticsConfigurations", - } -} - -// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketAnalyticsConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go deleted file mode 100644 index 3f6d19a34..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ /dev/null @@ -1,267 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the S3 Intelligent-Tiering configuration from the specified bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to ListBucketIntelligentTieringConfigurations include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [PutBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { - if params == nil { - params = &ListBucketIntelligentTieringConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketIntelligentTieringConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketIntelligentTieringConfigurationsInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - noSmithyDocumentSerde -} - -func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketIntelligentTieringConfigurationsOutput struct { - - // The ContinuationToken that represents a placeholder from where this request - // should begin. - ContinuationToken *string - - // The list of S3 Intelligent-Tiering configurations for a bucket. - IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration - - // Indicates whether the returned list of analytics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketIntelligentTieringConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketIntelligentTieringConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketIntelligentTieringConfigurations", - } -} - -// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketIntelligentTieringConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go deleted file mode 100644 index 5a675d2a9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ /dev/null @@ -1,275 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns a list of inventory configurations for the bucket. You can have up to -// 1,000 analytics configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. If -// there are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken . You use the NextContinuationToken value to -// continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] -// -// The following operations are related to ListBucketInventoryConfigurations : -// -// [GetBucketInventoryConfiguration] -// -// [DeleteBucketInventoryConfiguration] -// -// [PutBucketInventoryConfiguration] -// -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { - if params == nil { - params = &ListBucketInventoryConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketInventoryConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketInventoryConfigurationsInput struct { - - // The name of the bucket containing the inventory configurations to retrieve. - // - // This member is required. - Bucket *string - - // The marker used to continue an inventory configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListBucketInventoryConfigurationsOutput struct { - - // If sent in the request, the marker that is used as a starting point for this - // inventory configuration list response. - ContinuationToken *string - - // The list of inventory configurations for a bucket. - InventoryConfigurationList []types.InventoryConfiguration - - // Tells whether the returned list of inventory configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken is provided for a subsequent request. - IsTruncated *bool - - // The marker used to continue this inventory configuration listing. Use the - // NextContinuationToken from this response to continue the listing in a subsequent - // request. The continuation token is an opaque value that Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketInventoryConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketInventoryConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketInventoryConfigurations", - } -} - -// getListBucketInventoryConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketInventoryConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go deleted file mode 100644 index e6484f4b0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ /dev/null @@ -1,277 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Lists the metrics configurations for the bucket. The metrics configurations are -// only for the request metrics of the bucket and do not provide information on -// daily storage metrics. You can have up to 1,000 configurations per bucket. -// -// This action supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. If -// there are more configurations to list, IsTruncated is set to true, and there is -// a value in NextContinuationToken . You use the NextContinuationToken value to -// continue the pagination of the list by passing the value in continuation-token -// in the request to GET the next page. -// -// To use this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For more information about metrics configurations and CloudWatch request -// metrics, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to ListBucketMetricsConfigurations : -// -// [PutBucketMetricsConfiguration] -// -// [GetBucketMetricsConfiguration] -// -// [DeleteBucketMetricsConfiguration] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { - if params == nil { - params = &ListBucketMetricsConfigurationsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketMetricsConfigurationsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketMetricsConfigurationsInput struct { - - // The name of the bucket containing the metrics configurations to retrieve. - // - // This member is required. - Bucket *string - - // The marker that is used to continue a metrics configuration listing that has - // been truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - ContinuationToken *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type ListBucketMetricsConfigurationsOutput struct { - - // The marker that is used as a starting point for this metrics configuration list - // response. This value is present if it was sent in the request. - ContinuationToken *string - - // Indicates whether the returned list of metrics configurations is complete. A - // value of true indicates that the list is not complete and the - // NextContinuationToken will be provided for a subsequent request. - IsTruncated *bool - - // The list of metrics configurations for a bucket. - MetricsConfigurationList []types.MetricsConfiguration - - // The marker used to continue a metrics configuration listing that has been - // truncated. Use the NextContinuationToken from a previously truncated list - // response to continue the listing. The continuation token is an opaque value that - // Amazon S3 understands. - NextContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketMetricsConfigurations"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListBucketMetricsConfigurationsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBucketMetricsConfigurations", - } -} - -// getListBucketMetricsConfigurationsBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListBucketMetricsConfigurationsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go deleted file mode 100644 index d87540829..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ /dev/null @@ -1,346 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns a list of all buckets owned by the authenticated sender of the request. -// To grant IAM permission to use this operation, you must add the -// s3:ListAllMyBuckets policy action. -// -// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. -// -// We strongly recommend using only paginated ListBuckets requests. Unpaginated -// ListBuckets requests are only supported for Amazon Web Services accounts set to -// the default general purpose bucket quota of 10,000. If you have an approved -// general purpose bucket quota above 10,000, you must send paginated ListBuckets -// requests to list your account’s buckets. All unpaginated ListBuckets requests -// will be rejected for Amazon Web Services accounts with a general purpose bucket -// quota greater than 10,000. -// -// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html -func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { - if params == nil { - params = &ListBucketsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListBucketsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListBucketsInput struct { - - // Limits the response to buckets that are located in the specified Amazon Web - // Services Region. The Amazon Web Services Region must be expressed according to - // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) - // Region. For a list of the valid values for all of the Amazon Web Services - // Regions, see [Regions and Endpoints]. - // - // Requests made to a Regional endpoint that is different from the bucket-region - // parameter are not supported. For example, if you want to limit the response to - // your buckets in Region us-west-2 , the request must be made to an endpoint in - // Region us-west-2 . - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - BucketRegion *string - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. - // - // Length Constraints: Minimum length of 0. Maximum length of 1024. - // - // Required: No. - // - // If you specify the bucket-region , prefix , or continuation-token query - // parameters without using max-buckets to set the maximum number of buckets - // returned in the response, Amazon S3 applies a default page size of 10,000 and - // provides a continuation token if there are more buckets. - ContinuationToken *string - - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - MaxBuckets *int32 - - // Limits the response to bucket names that begin with the specified bucket name - // prefix. - Prefix *string - - noSmithyDocumentSerde -} - -type ListBucketsOutput struct { - - // The list of buckets owned by the requester. - Buckets []types.Bucket - - // ContinuationToken is included in the response when there are more buckets that - // can be listed with pagination. The next ListBuckets request to Amazon S3 can be - // continued with this ContinuationToken . ContinuationToken is obfuscated and is - // not a real bucket. - ContinuationToken *string - - // The owner of the buckets listed. - Owner *types.Owner - - // If Prefix was sent with the request, it is included in the response. - // - // All bucket names in the response begin with the specified bucket name prefix. - Prefix *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListBuckets"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListBucketsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListBucketsPaginatorOptions is the paginator options for ListBuckets -type ListBucketsPaginatorOptions struct { - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListBucketsPaginator is a paginator for ListBuckets -type ListBucketsPaginator struct { - options ListBucketsPaginatorOptions - client ListBucketsAPIClient - params *ListBucketsInput - nextToken *string - firstPage bool -} - -// NewListBucketsPaginator returns a new ListBucketsPaginator -func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator { - if params == nil { - params = &ListBucketsInput{} - } - - options := ListBucketsPaginatorOptions{} - if params.MaxBuckets != nil { - options.Limit = *params.MaxBuckets - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListBucketsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListBucketsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListBuckets page. -func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxBuckets = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListBuckets(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.ContinuationToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListBucketsAPIClient is a client that implements the ListBuckets operation. -type ListBucketsAPIClient interface { - ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error) -} - -var _ ListBucketsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListBuckets", - } -} - -func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: false, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go deleted file mode 100644 index ca9ccb587..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ /dev/null @@ -1,328 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see [Directory buckets]in the -// Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in -// an IAM identity-based policy instead of a bucket policy. Cross-account access to -// this API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// The BucketRegion response element is not part of the ListDirectoryBuckets -// Response Syntax. -// -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { - if params == nil { - params = &ListDirectoryBucketsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListDirectoryBucketsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListDirectoryBucketsInput struct { - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // buckets in this account with a token. ContinuationToken is obfuscated and is - // not a real bucket name. You can use this ContinuationToken for the pagination - // of the list results. - ContinuationToken *string - - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - MaxDirectoryBuckets *int32 - - noSmithyDocumentSerde -} - -func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) { - - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type ListDirectoryBucketsOutput struct { - - // The list of buckets owned by the requester. - Buckets []types.Bucket - - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - ContinuationToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListDirectoryBucketsPaginatorOptions is the paginator options for -// ListDirectoryBuckets -type ListDirectoryBucketsPaginatorOptions struct { - // Maximum number of buckets to be returned in response. When the number is more - // than the count of buckets that are owned by an Amazon Web Services account, - // return all the buckets in response. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets -type ListDirectoryBucketsPaginator struct { - options ListDirectoryBucketsPaginatorOptions - client ListDirectoryBucketsAPIClient - params *ListDirectoryBucketsInput - nextToken *string - firstPage bool -} - -// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator -func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator { - if params == nil { - params = &ListDirectoryBucketsInput{} - } - - options := ListDirectoryBucketsPaginatorOptions{} - if params.MaxDirectoryBuckets != nil { - options.Limit = *params.MaxDirectoryBuckets - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListDirectoryBucketsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListDirectoryBucketsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListDirectoryBuckets page. -func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxDirectoryBuckets = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.ContinuationToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListDirectoryBuckets", - } -} - -func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go deleted file mode 100644 index 1c2ecf1db..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ /dev/null @@ -1,508 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation lists in-progress multipart uploads in a bucket. An in-progress -// multipart upload is a multipart upload that has been initiated by the -// CreateMultipartUpload request, but has not yet been completed or aborted. -// -// Directory buckets - If multipart uploads in a directory bucket are in progress, -// you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. To delete these in-progress multipart uploads, use the -// ListMultipartUploads operation to list the in-progress multipart uploads in the -// bucket and use the AbortMultipartUpload operation to abort all the in-progress -// multipart uploads. -// -// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads -// in the response. The limit of 1,000 multipart uploads is also the default value. -// You can further limit the number of uploads in a response by specifying the -// max-uploads request parameter. If there are more than 1,000 multipart uploads -// that satisfy your ListMultipartUploads request, the response returns an -// IsTruncated element with the value of true , a NextKeyMarker element, and a -// NextUploadIdMarker element. To list the remaining multipart uploads, you need to -// make subsequent ListMultipartUploads requests. In these requests, include two -// query parameters: key-marker and upload-id-marker . Set the value of key-marker -// to the NextKeyMarker value from the previous response. Similarly, set the value -// of upload-id-marker to the NextUploadIdMarker value from the previous response. -// -// Directory buckets - The upload-id-marker element and the NextUploadIdMarker -// element aren't supported by directory buckets. To list the additional multipart -// uploads, you only need to set the value of key-marker to the NextKeyMarker -// value from the previous response. -// -// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Sorting of multipart uploads in response -// -// - General purpose bucket - In the ListMultipartUploads response, the multipart -// uploads are sorted based on two criteria: -// -// - Key-based sorting - Multipart uploads are initially sorted in ascending -// order based on their object keys. -// -// - Time-based sorting - For uploads that share the same object key, they are -// further sorted in ascending order based on the upload initiation time. Among -// uploads with the same key, the one that was initiated first will appear before -// the ones that were initiated later. -// -// - Directory bucket - In the ListMultipartUploads response, the multipart -// uploads aren't sorted lexicographically based on the object keys. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to ListMultipartUploads : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [ListParts] -// -// [AbortMultipartUpload] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { - if params == nil { - params = &ListMultipartUploadsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListMultipartUploadsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListMultipartUploadsInput struct { - - // The name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Character you use to group keys. - // - // All keys that contain the same string between the prefix, if specified, and the - // first occurrence of the delimiter after the prefix are grouped under a single - // result element, CommonPrefixes . If you don't specify the prefix parameter, then - // the substring starts at the beginning of the key. The keys that are grouped - // under CommonPrefixes result element are not returned elsewhere in the response. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies the multipart upload after which listing should begin. - // - // - General purpose buckets - For general purpose buckets, key-marker is an - // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. - // - // If upload-id-marker is not specified, only the keys lexicographically greater - // than the specified key-marker will be included in the list. - // - // If upload-id-marker is specified, any multipart uploads for a key equal to the - // key-marker might also be included, provided those multipart uploads have - // upload IDs lexicographically greater than the specified upload-id-marker . - // - // - Directory buckets - For directory buckets, key-marker is obfuscated and - // isn't a real object key. The upload-id-marker parameter isn't supported by - // directory buckets. To list the additional multipart uploads, you only need to - // set the value of key-marker to the NextKeyMarker value from the previous - // response. - // - // In the ListMultipartUploads response, the multipart uploads aren't sorted - // lexicographically based on the object keys. - KeyMarker *string - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the - // response body. 1,000 is the maximum number of uploads that can be returned in a - // response. - MaxUploads *int32 - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different grouping of - // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter is - // ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . - // - // This functionality is not supported for directory buckets. - UploadIdMarker *string - - noSmithyDocumentSerde -} - -func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListMultipartUploadsOutput struct { - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - Bucket *string - - // If you specify a delimiter in the request, then the result returns each - // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - CommonPrefixes []types.CommonPrefix - - // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object keys in the response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . - EncodingType types.EncodingType - - // Indicates whether the returned list of multipart uploads is truncated. A value - // of true indicates that the list was truncated. The list can be truncated if the - // number of multipart uploads exceeds the limit allowed or specified by max - // uploads. - IsTruncated *bool - - // The key at or after which the listing began. - KeyMarker *string - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int32 - - // When a list is truncated, this element specifies the value that should be used - // for the key-marker request parameter in a subsequent request. - NextKeyMarker *string - - // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. - // - // This functionality is not supported for directory buckets. - NextUploadIdMarker *string - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter is - // ignored. Otherwise, any multipart uploads for a key equal to the key-marker - // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . - // - // This functionality is not supported for directory buckets. - UploadIdMarker *string - - // Container for elements related to a particular multipart upload. A response can - // contain zero or more Upload elements. - Uploads []types.MultipartUpload - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListMultipartUploads"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListMultipartUploadsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListMultipartUploads", - } -} - -// getListMultipartUploadsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListMultipartUploadsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListMultipartUploadsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go deleted file mode 100644 index 32898706e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ /dev/null @@ -1,370 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns metadata about all versions of the objects in a bucket. You can also -// use request parameters as selection criteria to return metadata about a subset -// of all the object versions. -// -// To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. -// -// A 200 OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// -// To use this operation, you must have READ access to the bucket. -// -// The following operations are related to ListObjectVersions : -// -// [ListObjectsV2] -// -// [GetObject] -// -// [PutObject] -// -// [DeleteObject] -// -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html -func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { - if params == nil { - params = &ListObjectVersionsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectVersionsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectVersionsInput struct { - - // The bucket name that contains the objects. - // - // This member is required. - Bucket *string - - // A delimiter is a character that you specify to group keys. All keys that - // contain the same string between the prefix and the first occurrence of the - // delimiter are grouped under a single result element in CommonPrefixes . These - // groups are counted as one result against the max-keys limitation. These keys - // are not returned elsewhere in the response. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. If additional keys satisfy the search criteria, but - // were not returned because max-keys was exceeded, the response contains true . To - // return the additional keys, see key-marker and version-id-marker . - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Use this parameter to select only those keys that begin with the specified - // prefix. You can use prefixes to separate a bucket into different groupings of - // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) You can use prefix with delimiter to roll up - // numerous objects into a single result under CommonPrefixes . - Prefix *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the object version you want to start listing from. - VersionIdMarker *string - - noSmithyDocumentSerde -} - -func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectVersionsOutput struct { - - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. - CommonPrefixes []types.CommonPrefix - - // Container for an object that is a delete marker. - DeleteMarkers []types.DeleteMarkerEntry - - // The delimiter grouping the included keys. A delimiter is a character that you - // specify to group keys. All keys that contain the same string between the prefix - // and the first occurrence of the delimiter are grouped under a single result - // element in CommonPrefixes . These groups are counted as one result against the - // max-keys limitation. These keys are not returned elsewhere in the response. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . - EncodingType types.EncodingType - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. If your results were truncated, you can make a - // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the rest of - // the results. - IsTruncated *bool - - // Marks the last key returned in a truncated response. - KeyMarker *string - - // Specifies the maximum number of objects to return. - MaxKeys *int32 - - // The bucket name. - Name *string - - // When the number of responses exceeds the value of MaxKeys , NextKeyMarker - // specifies the first key not returned that satisfies the search criteria. Use - // this value for the key-marker request parameter in a subsequent request. - NextKeyMarker *string - - // When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker - // specifies the first object version not returned that satisfies the search - // criteria. Use this value for the version-id-marker request parameter in a - // subsequent request. - NextVersionIdMarker *string - - // Selects objects that start with the value supplied by this parameter. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Marks the last version of the key returned in a truncated response. - VersionIdMarker *string - - // Container for version information. - Versions []types.ObjectVersion - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectVersions"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListObjectVersionsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjectVersions", - } -} - -// getListObjectVersionsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getListObjectVersionsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectVersionsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectVersionsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go deleted file mode 100644 index de91cd3b5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ /dev/null @@ -1,397 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Returns some or all (up to 1,000) of the objects in a bucket. You can use the -// request parameters as selection criteria to return a subset of the objects in a -// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design -// your application to parse the contents of the response and handle it -// appropriately. -// -// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], -// when developing applications. For backward compatibility, Amazon S3 continues to -// support ListObjects . -// -// The following operations are related to ListObjects : -// -// [ListObjectsV2] -// -// [GetObject] -// -// [PutObject] -// -// [CreateBucket] -// -// [ListBuckets] -// -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html -func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { - if params == nil { - params = &ListObjectsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectsInput struct { - - // The name of the bucket containing the objects. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // A delimiter is a character that you use to group keys. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. Marker can be any key in the bucket. - Marker *string - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Limits the response to keys that begin with the specified prefix. - Prefix *string - - // Confirms that the requester knows that she or he will be charged for the list - // objects request. Bucket owners need not specify this parameter in their - // requests. - RequestPayer types.RequestPayer - - noSmithyDocumentSerde -} - -func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectsOutput struct { - - // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the next - // occurrence of the string specified by the delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . - // - // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in - // notes/summer/july , the common prefix is notes/summer/ . All of the keys that - // roll up into a common prefix count as a single return when calculating the - // number of returns. - CommonPrefixes []types.CommonPrefix - - // Metadata about each object returned. - Contents []types.Object - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element in the - // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in - // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // A flag that indicates whether Amazon S3 returned all of the results that - // satisfied the search criteria. - IsTruncated *bool - - // Indicates where in the bucket listing begins. Marker is included in the - // response if it was sent with the request. - Marker *string - - // The maximum number of keys returned in the response body. - MaxKeys *int32 - - // The bucket name. - Name *string - - // When the response is truncated (the IsTruncated element value in the response - // is true ), you can use the key name in this field as the marker parameter in - // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. - // - // This element is returned only if you have the delimiter request parameter - // specified. If the response does not include the NextMarker element and it is - // truncated, you can use the value of the last Key element in the response as the - // marker parameter in the subsequent request to get the next set of object keys. - NextMarker *string - - // Keys that begin with the indicated prefix. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjects"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListObjectsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *ListObjectsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjects", - } -} - -// getListObjectsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getListObjectsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go deleted file mode 100644 index 6ba1083d5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ /dev/null @@ -1,589 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns some or all (up to 1,000) of the objects in a bucket with each request. -// You can use the request parameters as selection criteria to return a subset of -// the objects in a bucket. A 200 OK response can contain valid or invalid XML. -// Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the -// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets]. -// -// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't -// return prefixes that are related only to in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, ListObjectsV2 response includes -// the prefixes that are related only to in-progress multipart uploads. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// -// - General purpose bucket permissions - To use this operation, you must have -// READ access to the bucket. You must have permission to perform the -// s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// Sorting order of returned objects -// -// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns -// objects in lexicographical order based on their key names. -// -// - Directory bucket - For directory buckets, ListObjectsV2 does not return -// objects in lexicographical order. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// This section describes the latest revision of this action. We recommend that -// you use this revised API operation for application development. For backward -// compatibility, Amazon S3 continues to support the prior version of this API -// operation, [ListObjects]. -// -// The following operations are related to ListObjectsV2 : -// -// [GetObject] -// -// [PutObject] -// -// [CreateBucket] -// -// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html -// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { - if params == nil { - params = &ListObjectsV2Input{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares) - if err != nil { - return nil, err - } - - out := result.(*ListObjectsV2Output) - out.ResultMetadata = metadata - return out, nil -} - -type ListObjectsV2Input struct { - - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. - ContinuationToken *string - - // A delimiter is a character that you use to group keys. - // - // - Directory buckets - For directory buckets, / is the only supported delimiter. - // - // - Directory buckets - When you query ListObjectsV2 with a delimiter during - // in-progress multipart uploads, the CommonPrefixes response parameter contains - // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. - // - // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html - Delimiter *string - - // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are - // encoded only in UTF-8. An object key can contain any Unicode character. However, - // the XML 1.0 parser can't parse certain characters, such as characters with an - // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the - // response. For more information about characters to avoid in object key names, - // see [Object key naming guidelines]. - // - // When using the URL encoding type, non-ASCII characters that are used in an - // object's key name will be percent-encoded according to UTF-8 code values. For - // example, the object test_file(3).png will appear as test_file%283%29.png . - // - // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines - // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html - EncodingType types.EncodingType - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The owner field is not present in ListObjectsV2 by default. If you want to - // return the owner field with each key in the result, then set the FetchOwner - // field to true . - // - // Directory buckets - For directory buckets, the bucket owner is returned as the - // object owner for all objects. - FetchOwner *bool - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. - // - // This functionality is not supported for directory buckets. - OptionalObjectAttributes []types.OptionalObjectAttributes - - // Limits the response to keys that begin with the specified prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // Confirms that the requester knows that she or he will be charged for the list - // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. - // - // This functionality is not supported for directory buckets. - RequestPayer types.RequestPayer - - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. - // - // This functionality is not supported for directory buckets. - StartAfter *string - - noSmithyDocumentSerde -} - -func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Prefix = in.Prefix - -} - -type ListObjectsV2Output struct { - - // All of the keys (up to 1,000) that share the same prefix are grouped together. - // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. - // - // A response can contain CommonPrefixes only if you specify a delimiter. - // - // CommonPrefixes contains all (if there are any) keys between Prefix and the next - // occurrence of the string specified by a delimiter. - // - // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . - // - // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in - // notes/summer/july , the common prefix is notes/summer/ . All of the keys that - // roll up into a common prefix count as a single return when calculating the - // number of returns. - // - // - Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - // - // - Directory buckets - When you query ListObjectsV2 with a delimiter during - // in-progress multipart uploads, the CommonPrefixes response parameter contains - // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. - // - // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html - CommonPrefixes []types.CommonPrefix - - // Metadata about each object returned. - Contents []types.Object - - // If ContinuationToken was sent with the request, it is included in the - // response. You can use the returned ContinuationToken for pagination of the list - // response. You can use this ContinuationToken for pagination of the list - // results. - ContinuationToken *string - - // Causes keys that contain the same string between the prefix and the first - // occurrence of the delimiter to be rolled up into a single result element in the - // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in - // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. - // - // Directory buckets - For directory buckets, / is the only supported delimiter. - Delimiter *string - - // Encoding type used by Amazon S3 to encode object key names in the XML response. - // - // If you specify the encoding-type request parameter, Amazon S3 includes this - // element in the response, and returns encoded key name values in the following - // response elements: - // - // Delimiter, Prefix, Key, and StartAfter . - EncodingType types.EncodingType - - // Set to false if all of the results were returned. Set to true if more keys are - // available to return. If the number of results exceeds that specified by MaxKeys - // , all of the results might not be returned. - IsTruncated *bool - - // KeyCount is the number of keys returned with this request. KeyCount will always - // be less than or equal to the MaxKeys field. For example, if you ask for 50 - // keys, your result will include 50 keys or fewer. - KeyCount *int32 - - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - MaxKeys *int32 - - // The bucket name. - Name *string - - // NextContinuationToken is sent when isTruncated is true, which means there are - // more keys in the bucket that can be listed. The next list requests to Amazon S3 - // can be continued with this NextContinuationToken . NextContinuationToken is - // obfuscated and is not a real key - NextContinuationToken *string - - // Keys that begin with the indicated prefix. - // - // Directory buckets - For directory buckets, only prefixes that end in a - // delimiter ( / ) are supported. - Prefix *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If StartAfter was sent with the request, it is included in the response. - // - // This functionality is not supported for directory buckets. - StartAfter *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectsV2"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 -type ListObjectsV2PaginatorOptions struct { - // Sets the maximum number of keys returned in the response. By default, the - // action returns up to 1,000 key names. The response might contain fewer keys but - // will never contain more. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListObjectsV2Paginator is a paginator for ListObjectsV2 -type ListObjectsV2Paginator struct { - options ListObjectsV2PaginatorOptions - client ListObjectsV2APIClient - params *ListObjectsV2Input - nextToken *string - firstPage bool -} - -// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator -func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator { - if params == nil { - params = &ListObjectsV2Input{} - } - - options := ListObjectsV2PaginatorOptions{} - if params.MaxKeys != nil { - options.Limit = *params.MaxKeys - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListObjectsV2Paginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.ContinuationToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListObjectsV2Paginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListObjectsV2 page. -func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.ContinuationToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxKeys = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = nil - if result.IsTruncated != nil && *result.IsTruncated { - p.nextToken = result.NextContinuationToken - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) -} - -var _ ListObjectsV2APIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListObjectsV2", - } -} - -// getListObjectsV2BucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getListObjectsV2BucketMember(input interface{}) (*string, bool) { - in := input.(*ListObjectsV2Input) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListObjectsV2BucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go deleted file mode 100644 index 575ab00af..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ /dev/null @@ -1,567 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Lists the parts that have been uploaded for a specific multipart upload. -// -// To use this operation, you must provide the upload ID in the request. You -// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. -// -// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of -// 1,000 parts is also the default value. You can restrict the number of parts in a -// response by specifying the max-parts request parameter. If your multipart -// upload consists of more than 1,000 parts, the response returns an IsTruncated -// field with the value of true , and a NextPartNumberMarker element. To list -// remaining uploaded parts, in subsequent ListParts requests, include the -// part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. -// -// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. -// -// If the upload was created using server-side encryption with Key Management -// -// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt -// action for the ListParts request to succeed. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to ListParts : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [GetObjectAttributes] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { - if params == nil { - params = &ListPartsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListPartsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListPartsInput struct { - - // The name of the bucket to which the parts are being uploaded. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Upload ID identifying the multipart upload whose parts are being listed. - // - // This member is required. - UploadId *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Sets the maximum number of parts to return. - MaxParts *int32 - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type ListPartsOutput struct { - - // If the bucket has a lifecycle rule configured with an action to abort - // incomplete multipart uploads and the prefix in the lifecycle rule matches the - // object name in the request, then the response includes this header indicating - // when the initiated multipart upload will become eligible for abort operation. - // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. - // - // The response will also include the x-amz-abort-rule-id header that will provide - // the ID of the lifecycle configuration rule that defines this action. - // - // This functionality is not supported for directory buckets. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortDate *time.Time - - // This header is returned along with the x-amz-abort-date header. It identifies - // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. - // - // This functionality is not supported for directory buckets. - AbortRuleId *string - - // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. - Bucket *string - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm types.ChecksumAlgorithm - - // The checksum type, which determines how part-level checksums are combined to - // create an object-level checksum for multipart objects. You can use this header - // response to verify that the checksum type that is received is the same checksum - // type that was specified in CreateMultipartUpload request. For more information, - // see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Container element that identifies who initiated the multipart upload. If the - // initiator is an Amazon Web Services account, this element provides the same - // information as the Owner element. If the initiator is an IAM User, this element - // provides the user ARN and display name. - Initiator *types.Initiator - - // Indicates whether the returned list of parts is truncated. A true value - // indicates that the list was truncated. A list can be truncated if the number of - // parts exceeds the limit returned in the MaxParts element. - IsTruncated *bool - - // Object key for which the multipart upload was initiated. - Key *string - - // Maximum number of parts that were allowed in the response. - MaxParts *int32 - - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the part-number-marker request parameter in a - // subsequent request. - NextPartNumberMarker *string - - // Container element that identifies the object owner, after the object is - // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. - // - // Directory buckets - The bucket owner is returned as the object owner for all - // the parts. - Owner *types.Owner - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *string - - // Container for elements related to a particular part. A response can contain - // zero or more Part elements. - Parts []types.Part - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // The class of storage used to store the uploaded object. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - StorageClass types.StorageClass - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListParts"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpListPartsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addListPartsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListPartsPaginatorOptions is the paginator options for ListParts -type ListPartsPaginatorOptions struct { - // Sets the maximum number of parts to return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListPartsPaginator is a paginator for ListParts -type ListPartsPaginator struct { - options ListPartsPaginatorOptions - client ListPartsAPIClient - params *ListPartsInput - nextToken *string - firstPage bool -} - -// NewListPartsPaginator returns a new ListPartsPaginator -func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator { - if params == nil { - params = &ListPartsInput{} - } - - options := ListPartsPaginatorOptions{} - if params.MaxParts != nil { - options.Limit = *params.MaxParts - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListPartsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.PartNumberMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListPartsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListParts page. -func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.PartNumberMarker = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxParts = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListParts(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = nil - if result.IsTruncated != nil && *result.IsTruncated { - p.nextToken = result.NextPartNumberMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) -} - -var _ ListPartsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListParts", - } -} - -// getListPartsBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getListPartsBucketMember(input interface{}) (*string, bool) { - in := input.(*ListPartsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getListPartsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go deleted file mode 100644 index 0165e30f6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ /dev/null @@ -1,302 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster data -// transfers to Amazon S3. -// -// To use this operation, you must have permission to perform the -// s3:PutAccelerateConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// The Transfer Acceleration state of a bucket can be set to one of the following -// two values: -// -// - Enabled – Enables accelerated data transfers to the bucket. -// -// - Suspended – Disables accelerated data transfers to the bucket. -// -// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. -// -// After setting the Transfer Acceleration state of a bucket to Enabled, it might -// take up to thirty minutes before the data transfer rates to the bucket increase. -// -// The name of the bucket used for Transfer Acceleration must be DNS-compliant and -// must not contain periods ("."). -// -// For more information about transfer acceleration, see [Transfer Acceleration]. -// -// The following operations are related to PutBucketAccelerateConfiguration : -// -// [GetBucketAccelerateConfiguration] -// -// [CreateBucket] -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { - if params == nil { - params = &PutBucketAccelerateConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAccelerateConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAccelerateConfigurationInput struct { - - // Container for setting the transfer acceleration state. - // - // This member is required. - AccelerateConfiguration *types.AccelerateConfiguration - - // The name of the bucket for which the accelerate configuration is set. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAccelerateConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAccelerateConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAccelerateConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAccelerateConfiguration", - } -} - -// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request -// checksum algorithm value provided as input. -func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketAccelerateConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAccelerateConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go deleted file mode 100644 index 4fba71be9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ /dev/null @@ -1,449 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the permissions on an existing bucket using access control lists (ACL). -// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the -// WRITE_ACP permission. -// -// You can use one of the following two ways to set a bucket's permissions: -// -// - Specify the ACL in the request body -// -// - Specify permissions using request headers -// -// You cannot specify access permission using both the body and the request -// headers. -// -// Depending on your application needs, you may choose to set the ACL on a bucket -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies to -// grant access to your bucket and the objects in it. Requests to set ACLs or -// update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the -// Amazon S3 User Guide. -// -// Permissions You can set access permissions by using one of the following -// methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a -// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined -// set of grantees and permissions. Specify the canned ACL name as the value of -// x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see [Canned ACL]. -// -// - Specify access permissions explicitly with the x-amz-grant-read , -// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use the -// x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. -// -// You specify each grantee as a type=value pair, where the type is one of the -// -// following: -// -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// -// - uri – if you are granting permissions to a predefined group -// -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// For example, the following x-amz-grant-write header grants create, overwrite, -// -// and delete objects permission to LogDelivery group predefined by Amazon S3 and -// two Amazon Web Services accounts identified by their email addresses. -// -// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", -// -// id="111122223333", id="555566667777" -// -// You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (using request elements) in the following ways: -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: -// -// <>Grantees@email.com<>& -// -// The grantee is resolved to the CanonicalUser and, in a response to a GET Object -// -// acl request, appears as the CanonicalUser. -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// The following operations are related to PutBucketAcl : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [GetObjectAcl] -// -// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html -// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html -// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL -// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { - if params == nil { - params = &PutBucketAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAclInput struct { - - // The bucket to which to apply the ACL. - // - // This member is required. - Bucket *string - - // The canned ACL to apply to the bucket. - ACL types.BucketCannedACL - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *types.AccessControlPolicy - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - GrantRead *string - - // Allows grantee to read the bucket ACL. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAclOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAcl", - } -} - -// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketAclInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketAclRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketAclBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go deleted file mode 100644 index ff457d603..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ /dev/null @@ -1,288 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per bucket. -// -// You can choose to have storage class analysis export analysis reports sent to a -// comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you -// configure. When selecting data export, you specify a destination bucket and an -// optional destination prefix where the file is written. You can export the data -// to a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. -// -// You must create a bucket policy on the destination bucket where the exported -// file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. -// -// To use this operation, you must have permissions to perform the -// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// PutBucketAnalyticsConfiguration has the following special errors: -// -// - HTTP Error: HTTP 400 Bad Request -// -// - Code: InvalidArgument -// -// - Cause: Invalid argument. -// -// - HTTP Error: HTTP 400 Bad Request -// -// - Code: TooManyConfigurations -// -// - Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// - HTTP Error: HTTP 403 Forbidden -// -// - Code: AccessDenied -// -// - Cause: You are not the owner of the specified bucket, or you do not have -// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on -// the bucket. -// -// The following operations are related to PutBucketAnalyticsConfiguration : -// -// [GetBucketAnalyticsConfiguration] -// -// [DeleteBucketAnalyticsConfiguration] -// -// [ListBucketAnalyticsConfigurations] -// -// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html -// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 -// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html -// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { - if params == nil { - params = &PutBucketAnalyticsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketAnalyticsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketAnalyticsConfigurationInput struct { - - // The configuration and any analyses for the analytics filter. - // - // This member is required. - AnalyticsConfiguration *types.AnalyticsConfiguration - - // The name of the bucket to which an analytics configuration is stored. - // - // This member is required. - Bucket *string - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketAnalyticsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAnalyticsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketAnalyticsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketAnalyticsConfiguration", - } -} - -// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketAnalyticsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go deleted file mode 100644 index abe692442..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ /dev/null @@ -1,327 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the cors configuration for your bucket. If the configuration exists, -// Amazon S3 replaces it. -// -// To use this operation, you must be allowed to perform the s3:PutBucketCORS -// action. By default, the bucket owner has this permission and can grant it to -// others. -// -// You set this configuration on a bucket so that the bucket can service -// cross-origin requests. For example, you might want to enable a request whose -// origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. -// -// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors -// subresource to the bucket. The cors subresource is an XML document in which you -// configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. -// -// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS -// request) against a bucket, it evaluates the cors configuration on the bucket -// and uses the first CORSRule rule that matches the incoming browser request to -// enable a cross-origin request. For a rule to match, the following conditions -// must be met: -// -// - The request's Origin header must match AllowedOrigin elements. -// -// - The request method (for example, GET, PUT, HEAD, and so on) or the -// Access-Control-Request-Method header in case of a pre-flight OPTIONS request -// must be one of the AllowedMethod elements. -// -// - Every header specified in the Access-Control-Request-Headers request header -// of a pre-flight request must match an AllowedHeader element. -// -// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. -// -// The following operations are related to PutBucketCors : -// -// [GetBucketCors] -// -// [DeleteBucketCors] -// -// [RESTOPTIONSobject] -// -// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html -// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html -func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { - if params == nil { - params = &PutBucketCorsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketCorsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketCorsInput struct { - - // Specifies the bucket impacted by the cors configuration. - // - // This member is required. - Bucket *string - - // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. - // - // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html - // - // This member is required. - CORSConfiguration *types.CORSConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketCorsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketCors"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketCorsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketCors", - } -} - -// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketCorsInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketCorsBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketCorsBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketCorsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketCorsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go deleted file mode 100644 index 0f9db2a25..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ /dev/null @@ -1,404 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation configures default encryption and Amazon S3 Bucket Keys for an -// existing bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// By default, all buckets have a default encryption configuration that uses -// server-side encryption with Amazon S3 managed keys (SSE-S3). -// -// - General purpose buckets -// -// - You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. -// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the -// Amazon S3 User Guide. -// -// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify -// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID -// provided in PutBucketEncryption requests. -// -// - Directory buckets - You can optionally configure default encryption for a -// bucket by using server-side encryption with Key Management Service (KMS) keys -// (SSE-KMS). -// -// - We recommend that the bucket's default encryption uses the desired -// encryption configuration and you don't override the bucket default encryption in -// your CreateSession requests or PUT object requests. Then, new objects are -// automatically encrypted with the desired encryption settings. For more -// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] -// . -// -// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket for the -// lifetime of the bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. -// -// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory -// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy -// SSE-KMS encrypted objects from general purpose buckets to directory buckets, -// from directory buckets to general purpose buckets, or between directory buckets, -// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a -// copy request is made for a KMS-encrypted object. -// -// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the -// key ID or key ARN. The key alias format of the KMS key isn't supported. -// -// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to -// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption -// requests. -// -// If you're specifying a customer managed KMS key, we recommend using a fully -// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the -// key within the requester’s account. This behavior can result in data that's -// encrypted with a KMS key that belongs to the requester, and not the bucket -// owner. -// -// Also, this action requires Amazon Web Services Signature Version 4. For more -// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. -// -// Permissions -// -// - General purpose bucket permissions - The s3:PutEncryptionConfiguration -// permission is required in a policy. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutEncryptionConfiguration permission in an IAM -// identity-based policy instead of a bucket policy. Cross-account access to this -// API operation isn't supported. This operation can only be performed by the -// Amazon Web Services account that owns the resource. For more information about -// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// To set a directory bucket default encryption with SSE-KMS, you must also have -// -// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based -// policies and KMS key policies for the target KMS key. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to PutBucketEncryption : -// -// [GetBucketEncryption] -// -// [DeleteBucketEncryption] -// -// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html -// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html -// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html -// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html -// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job -// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { - if params == nil { - params = &PutBucketEncryptionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketEncryptionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketEncryptionInput struct { - - // Specifies default encryption for a bucket using server-side encryption with - // different key options. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // Specifies the default server-side-encryption configuration. - // - // This member is required. - ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the server-side encryption - // configuration. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketEncryptionOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketEncryption"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketEncryptionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketEncryption", - } -} - -// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketEncryptionInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketEncryptionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketEncryptionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketEncryptionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go deleted file mode 100644 index 6a11a0946..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ /dev/null @@ -1,275 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can -// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. -// -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without performance impact or operational overhead. S3 Intelligent-Tiering -// delivers automatic cost savings in three low latency and high throughput access -// tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. -// -// The S3 Intelligent-Tiering storage class is the ideal storage class for data -// with unknown, changing, or unpredictable access patterns, independent of object -// size or retention period. If the size of an object is less than 128 KB, it is -// not monitored and not eligible for auto-tiering. Smaller objects can be stored, -// but they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. -// -// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// Operations related to PutBucketIntelligentTieringConfiguration include: -// -// [DeleteBucketIntelligentTieringConfiguration] -// -// [GetBucketIntelligentTieringConfiguration] -// -// [ListBucketIntelligentTieringConfigurations] -// -// You only need S3 Intelligent-Tiering enabled on a bucket if you want to -// automatically move objects stored in the S3 Intelligent-Tiering storage class to -// the Archive Access or Deep Archive Access tier. -// -// PutBucketIntelligentTieringConfiguration has the following special errors: -// -// HTTP 400 Bad Request Error Code: InvalidArgument -// -// Cause: Invalid Argument -// -// HTTP 400 Bad Request Error Code: TooManyConfigurations -// -// Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, -// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission -// to set the configuration on the bucket. -// -// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html -// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html -func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { - if params == nil { - params = &PutBucketIntelligentTieringConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketIntelligentTieringConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketIntelligentTieringConfigurationInput struct { - - // The name of the Amazon S3 bucket whose configuration you want to modify or - // retrieve. - // - // This member is required. - Bucket *string - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // Container for S3 Intelligent-Tiering configuration. - // - // This member is required. - IntelligentTieringConfiguration *types.IntelligentTieringConfiguration - - noSmithyDocumentSerde -} - -func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketIntelligentTieringConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketIntelligentTieringConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketIntelligentTieringConfiguration", - } -} - -// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to -// string denoting a provided bucket member valueand a boolean indicating if the -// input has a modeled bucket name, -func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketIntelligentTieringConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go deleted file mode 100644 index d4ea335a4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// This implementation of the PUT action adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. -// -// Amazon S3 inventory generates inventories of the objects in the bucket on a -// daily or weekly basis, and the results are published to a flat file. The bucket -// that is inventoried is called the source bucket, and the bucket where the -// inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. -// -// When you configure an inventory for a source bucket, you specify the -// destination bucket where you want the inventory to be stored, and whether to -// generate the inventory daily or weekly. You can also configure what object -// metadata to include and whether to inventory all object versions or only current -// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. -// -// You must create a bucket policy on the destination bucket to grant permissions -// to Amazon S3 to write objects to the bucket in the defined location. For an -// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. -// -// Permissions To use this operation, you must have permission to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. -// -// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report -// that includes all object metadata fields available and to specify the -// destination bucket to store the inventory. A user with read access to objects in -// the destination bucket can also access all object metadata fields that are -// available in the inventory report. -// -// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. -// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] -// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] -// in the Amazon S3 User Guide. -// -// PutBucketInventoryConfiguration has the following special errors: -// -// HTTP 400 Bad Request Error Code: InvalidArgument -// -// Cause: Invalid Argument -// -// HTTP 400 Bad Request Error Code: TooManyConfigurations -// -// Cause: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, -// or you do not have the s3:PutInventoryConfiguration bucket permission to set -// the configuration on the bucket. -// -// The following operations are related to PutBucketInventoryConfiguration : -// -// [GetBucketInventoryConfiguration] -// -// [DeleteBucketInventoryConfiguration] -// -// [ListBucketInventoryConfigurations] -// -// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 -// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html -// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html -// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html -// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html -// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 -// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents -// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html -func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { - if params == nil { - params = &PutBucketInventoryConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketInventoryConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketInventoryConfigurationInput struct { - - // The name of the bucket where the inventory configuration will be stored. - // - // This member is required. - Bucket *string - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // Specifies the inventory configuration. - // - // This member is required. - InventoryConfiguration *types.InventoryConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketInventoryConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketInventoryConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketInventoryConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketInventoryConfiguration", - } -} - -// getPutBucketInventoryConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketInventoryConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go deleted file mode 100644 index 5d51b6567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ /dev/null @@ -1,413 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. Keep in mind that this will overwrite an existing -// lifecycle configuration, so if you want to retain any configuration details, -// they must be included in the new lifecycle configuration. For information about -// lifecycle configuration, see [Managing your storage lifecycle]. -// -// Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, object size, or any -// combination of these. Accordingly, this section describes the latest API. The -// previous version of the API supported filtering based only on an object key name -// prefix, which is supported for backward compatibility. For the related API -// description, see [PutBucketLifecycle]. -// -// Rules Permissions HTTP Host header syntax You specify the lifecycle -// configuration in your request body. The lifecycle configuration is specified as -// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can -// have up to 1,000 rules. This limit is not adjustable. -// -// Bucket lifecycle configuration supports specifying a lifecycle rule using an -// object key name prefix, one or more object tags, object size, or any combination -// of these. Accordingly, this section describes the latest API. The previous -// version of the API supported filtering based only on an object key name prefix, -// which is supported for backward compatibility for general purpose buckets. For -// the related API description, see [PutBucketLifecycle]. -// -// Lifecyle configurations for directory buckets only support expiring objects and -// cancelling multipart uploads. Expiring of versioned objects,transitions and tag -// filters are not supported. -// -// A lifecycle rule consists of the following: -// -// - A filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, object size, or any -// combination of these. -// -// - A status indicating whether the rule is in effect. -// -// - One or more lifecycle transition and expiration actions that you want -// Amazon S3 to perform on the objects identified by the filter. If the state of -// your bucket is versioning-enabled or versioning-suspended, you can have many -// versions of the same object (one current version and zero or more noncurrent -// versions). Amazon S3 provides predefined actions that you can specify for -// current and noncurrent object versions. -// -// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. -// -// - General purpose bucket permissions - By default, all Amazon S3 resources -// are private, including buckets, objects, and related subresources (for example, -// lifecycle configuration and website configuration). Only the resource owner -// (that is, the Amazon Web Services account that created it) can access the -// resource. The resource owner can optionally grant access permissions to others -// by writing an access policy. For this operation, a user must have the -// s3:PutLifecycleConfiguration permission. -// -// You can also explicitly deny permissions. An explicit deny also supersedes any -// -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: -// -// - s3:DeleteObject -// -// - s3:DeleteObjectVersion -// -// - s3:PutLifecycleConfiguration -// -// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. -// -// - Directory bucket permissions - You must have the -// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy -// to use this operation. Cross-account access to this API operation isn't -// supported. The resource owner can optionally grant access permissions to others -// by creating a role or user for them as long as they are within the same account -// as the owner and resource. -// -// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in -// -// the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name -// . Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . -// -// The following operations are related to PutBucketLifecycleConfiguration : -// -// [GetBucketLifecycleConfiguration] -// -// [DeleteBucketLifecycle] -// -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -// -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { - if params == nil { - params = &PutBucketLifecycleConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketLifecycleConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketLifecycleConfigurationInput struct { - - // The name of the bucket for which to set the configuration. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpectedBucketOwner *string - - // Container for lifecycle rules. You can add as many as 1,000 rules. - LifecycleConfiguration *types.BucketLifecycleConfiguration - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - noSmithyDocumentSerde -} - -func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketLifecycleConfigurationOutput struct { - - // Indicates which default minimum object size behavior is applied to the - // lifecycle configuration. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // - all_storage_classes_128K - Objects smaller than 128 KB will not transition - // to any storage class by default. - // - // - varies_by_storage_class - Objects smaller than 128 KB will transition to - // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, - // all other storage classes will prevent transitions smaller than 128 KB. - // - // To customize the minimum object size for any transition you can add a filter - // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body - // of your transition rule. Custom filters always take precedence over the default - // transition behavior. - TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLifecycleConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketLifecycleConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketLifecycleConfiguration", - } -} - -// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request -// checksum algorithm value provided as input. -func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketLifecycleConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketLifecycleConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go deleted file mode 100644 index bd2492cdd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Set the logging parameters for a bucket and to specify permissions for who can -// view and modify the logging parameters. All logs are saved to buckets in the -// same Amazon Web Services Region as the source bucket. To set the logging status -// of a bucket, you must be the bucket owner. -// -// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the -// Grantee request element to grant access to other people. The Permissions -// request element specifies the kind of access the grantee has to the logs. -// -// If the target bucket for log delivery uses the bucket owner enforced setting -// for S3 Object Ownership, you can't use the Grantee request element to grant -// access to others. Permissions can only be granted using policies. For more -// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (by using request elements) in the following ways: -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request. -// -// - By Email address: -// -// <>Grantees@email.com<> -// -// The grantee is resolved to the CanonicalUser and, in a response to a -// -// GETObjectAcl request, appears as the CanonicalUser. -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: -// -// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User -// Guide. -// -// For more information about creating a bucket, see [CreateBucket]. For more information about -// returning the logging status of a bucket, see [GetBucketLogging]. -// -// The following operations are related to PutBucketLogging : -// -// [PutObject] -// -// [DeleteBucket] -// -// [CreateBucket] -// -// [GetBucketLogging] -// -// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html -func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { - if params == nil { - params = &PutBucketLoggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketLoggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketLoggingInput struct { - - // The name of the bucket for which to set the logging parameters. - // - // This member is required. - Bucket *string - - // Container for logging status information. - // - // This member is required. - BucketLoggingStatus *types.BucketLoggingStatus - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash of the PutBucketLogging request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketLoggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLogging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketLoggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketLogging", - } -} - -// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketLoggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketLoggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketLoggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go deleted file mode 100644 index 0cd04f456..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ /dev/null @@ -1,266 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets a metrics configuration (specified by the metrics configuration ID) for -// the bucket. You can have up to 1,000 metrics configurations per bucket. If -// you're updating an existing metrics configuration, note that this is a full -// replacement of the existing metrics configuration. If you don't include the -// elements you want to keep, they are erased. -// -// To use this operation, you must have permissions to perform the -// s3:PutMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. -// -// The following operations are related to PutBucketMetricsConfiguration : -// -// [DeleteBucketMetricsConfiguration] -// -// [GetBucketMetricsConfiguration] -// -// [ListBucketMetricsConfigurations] -// -// PutBucketMetricsConfiguration has the following special error: -// -// - Error code: TooManyConfigurations -// -// - Description: You are attempting to create a new configuration but have -// already reached the 1,000-configuration limit. -// -// - HTTP Status Code: HTTP 400 Bad Request -// -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html -// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html -// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html -// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { - if params == nil { - params = &PutBucketMetricsConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketMetricsConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketMetricsConfigurationInput struct { - - // The name of the bucket for which the metrics configuration is set. - // - // This member is required. - Bucket *string - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // Specifies the metrics configuration. - // - // This member is required. - MetricsConfiguration *types.MetricsConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketMetricsConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketMetricsConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketMetricsConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketMetricsConfiguration", - } -} - -// getPutBucketMetricsConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketMetricsConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go deleted file mode 100644 index cbc39609f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ /dev/null @@ -1,280 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see [Configuring Event Notifications]. -// -// Using this API, you can replace an existing notification configuration. The -// configuration is an XML file that defines the event types that you want Amazon -// S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. -// -// By default, your bucket has no event notifications configured. That is, the -// notification configuration will be an empty NotificationConfiguration . -// -// This action replaces the existing notification configuration with the -// configuration you include in the request body. -// -// After Amazon S3 receives this request, it first verifies that any Amazon Simple -// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) -// destination exists, and that the bucket owner has permission to publish to it by -// sending a test notification. In the case of Lambda destinations, Amazon S3 -// verifies that the Lambda function permissions grant Amazon S3 permission to -// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. -// -// You can disable notifications by adding the empty NotificationConfiguration -// element. -// -// For more information about the number of event notification configurations that -// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. -// -// By default, only the bucket owner can configure notifications on a bucket. -// However, bucket owners can use a bucket policy to grant permission to other -// users to set this configuration with the required s3:PutBucketNotification -// permission. -// -// The PUT notification is an atomic operation. For example, suppose your -// notification configuration includes SNS topic, SQS queue, and Lambda function -// configurations. When you send a PUT request with this configuration, Amazon S3 -// sends test messages to your SNS topic. If the message fails, the entire PUT -// action will fail, and Amazon S3 will not add the configuration to your bucket. -// -// If the configuration in the request body includes only one TopicConfiguration -// specifying only the s3:ReducedRedundancyLostObject event type, the response -// will also include the x-amz-sns-test-message-id header containing the message -// ID of the test notification sent to the topic. -// -// The following action is related to PutBucketNotificationConfiguration : -// -// [GetBucketNotificationConfiguration] -// -// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 -// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html -// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { - if params == nil { - params = &PutBucketNotificationConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketNotificationConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketNotificationConfigurationInput struct { - - // The name of the bucket. - // - // This member is required. - Bucket *string - - // A container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off for the bucket. - // - // This member is required. - NotificationConfiguration *types.NotificationConfiguration - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or - // false value. - SkipDestinationValidation *bool - - noSmithyDocumentSerde -} - -func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketNotificationConfigurationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketNotificationConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketNotificationConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketNotificationConfiguration", - } -} - -// getPutBucketNotificationConfigurationBucketMember returns a pointer to string -// denoting a provided bucket member valueand a boolean indicating if the input has -// a modeled bucket name, -func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketNotificationConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go deleted file mode 100644 index 47263aa2a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ /dev/null @@ -1,266 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketOwnershipControls permission. For more -// information about Amazon S3 permissions, see [Specifying permissions in a policy]. -// -// For information about Amazon S3 Object Ownership, see [Using object ownership]. -// -// The following operations are related to PutBucketOwnershipControls : -// -// # GetBucketOwnershipControls -// -// # DeleteBucketOwnershipControls -// -// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html -// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html -func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { - if params == nil { - params = &PutBucketOwnershipControlsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketOwnershipControlsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketOwnershipControlsInput struct { - - // The name of the Amazon S3 bucket whose OwnershipControls you want to set. - // - // This member is required. - Bucket *string - - // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or - // ObjectWriter) that you want to apply to this Amazon S3 bucket. - // - // This member is required. - OwnershipControls *types.OwnershipControls - - // The MD5 hash of the OwnershipControls request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketOwnershipControlsOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketOwnershipControls"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketOwnershipControlsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketOwnershipControls", - } -} - -func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: nil, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketOwnershipControlsInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketOwnershipControlsBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go deleted file mode 100644 index 3dc7f6db3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ /dev/null @@ -1,377 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Regional endpoint. These endpoints support path-style requests -// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information about -// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more -// information about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions If you are using an identity other than the root user of the Amazon -// Web Services account that owns the bucket, the calling identity must both have -// the PutBucketPolicy permissions on the specified bucket and belong to the -// bucket owner's account in order to use this operation. -// -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. -// -// To ensure that bucket owners don't inadvertently lock themselves out of their -// own buckets, the root principal in a bucket owner's Amazon Web Services account -// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API -// actions, even if their bucket policy explicitly denies the root principal's -// access. Bucket owner root principals can only be blocked from performing these -// API actions by VPC endpoint policies and Amazon Web Services Organizations -// policies. -// -// - General purpose bucket permissions - The s3:PutBucketPolicy permission is -// required in a policy. For more information about general purpose buckets bucket -// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation, you -// must have the s3express:PutBucketPolicy permission in an IAM identity-based -// policy instead of a bucket policy. Cross-account access to this API operation -// isn't supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] -// in the Amazon S3 User Guide. -// -// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region-code.amazonaws.com . -// -// The following operations are related to PutBucketPolicy : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html -func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { - if params == nil { - params = &PutBucketPolicyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketPolicyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketPolicyInput struct { - - // The name of the bucket. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use path-style requests in the format - // https://s3express-control.region-code.amazonaws.com/bucket-name . - // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must - // also follow the format bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // - // This member is required. - Bucket *string - - // The bucket policy as a JSON document. - // - // For directory buckets, the only IAM action supported in the bucket policy is - // s3express:CreateSession . - // - // This member is required. - Policy *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC-32 - // - // - CRC-32C - // - // - CRC-64NVME - // - // - SHA-1 - // - // - SHA-256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. - // - // This functionality is not supported for directory buckets. - ConfirmRemoveSelfBucketAccess *bool - - // The MD5 hash of the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - // - // For directory buckets, this header is not supported in this API operation. If - // you specify this header, the request fails with the HTTP status code 501 Not - // Implemented . - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketPolicyOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketPolicy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketPolicyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketPolicy", - } -} - -// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketPolicyInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketPolicyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketPolicyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go deleted file mode 100644 index 40456d2ba..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ /dev/null @@ -1,345 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates a replication configuration or replaces an existing one. For more -// information, see [Replication]in the Amazon S3 User Guide. -// -// Specify the replication configuration in the request body. In the replication -// configuration, you provide the name of the destination bucket or buckets where -// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume -// to replicate objects on your behalf, and other relevant information. You can -// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] -// aws:RequestedRegion condition key. -// -// A replication configuration must include at least one rule, and can contain a -// maximum of 1,000. Each rule identifies a subset of objects to replicate by -// filtering the objects in the source bucket. To choose additional subsets of -// objects to replicate, add a rule for each subset. -// -// To specify a subset of the objects in the source bucket to apply a replication -// rule to, add the Filter element as a child of the Rule element. You can filter -// objects based on an object key prefix, one or more object tags, or both. When -// you add the Filter element in the configuration, you must also add the following -// elements: DeleteMarkerReplication , Status , and Priority . -// -// If you are using an earlier version of the replication configuration, Amazon S3 -// handles replication of delete markers differently. For more information, see [Backward Compatibility]. -// -// For information about enabling versioning on a bucket, see [Using Versioning]. -// -// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't -// replicate objects that are stored at rest using server-side encryption with KMS -// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: -// SourceSelectionCriteria , SseKmsEncryptedObjects , Status , -// EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. -// -// For information on PutBucketReplication errors, see [List of replication-related error codes] -// -// Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. -// -// By default, a resource owner, in this case the Amazon Web Services account that -// created the bucket, can perform this operation. The resource owner can also -// grant others permissions to perform the operation. For more information about -// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// To perform this operation, the user or role performing the action must have the [iam:PassRole] -// permission. -// -// The following operations are related to PutBucketReplication : -// -// [GetBucketReplication] -// -// [DeleteBucketReplication] -// -// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html -// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html -// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion -// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html -// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html -// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html -// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList -// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations -// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { - if params == nil { - params = &PutBucketReplicationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketReplicationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketReplicationInput struct { - - // The name of the bucket - // - // This member is required. - Bucket *string - - // A container for replication rules. You can add up to 1,000 rules. The maximum - // size of a replication configuration is 2 MB. - // - // This member is required. - ReplicationConfiguration *types.ReplicationConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string - - noSmithyDocumentSerde -} - -func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketReplicationOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketReplication"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketReplicationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketReplication", - } -} - -// getPutBucketReplicationRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketReplicationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketReplicationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketReplicationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketReplicationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go deleted file mode 100644 index a920e1be2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ /dev/null @@ -1,292 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download will -// be charged for the download. For more information, see [Requester Pays Buckets]. -// -// The following operations are related to PutBucketRequestPayment : -// -// [CreateBucket] -// -// [GetBucketRequestPayment] -// -// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html -// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { - if params == nil { - params = &PutBucketRequestPaymentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketRequestPaymentOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketRequestPaymentInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for Payer. - // - // This member is required. - RequestPaymentConfiguration *types.RequestPaymentConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketRequestPaymentOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketRequestPayment"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketRequestPaymentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketRequestPayment", - } -} - -// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketRequestPaymentInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketRequestPaymentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketRequestPaymentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go deleted file mode 100644 index 130021d73..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ /dev/null @@ -1,324 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the tags for a bucket. -// -// Use tags to organize your Amazon Web Services bill to reflect your own cost -// structure. To do this, sign up to get your Amazon Web Services account bill with -// tag key values included. Then, to see the cost of combined resources, organize -// your billing information according to resources with the same tag key values. -// For example, you can tag several resources with a specific application name, and -// then organize your billing information to see the total cost of that application -// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. -// -// When this operation sets the tags for a bucket, it will overwrite any current -// tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. -// -// To use this operation, you must have permissions to perform the -// s3:PutBucketTagging action. The bucket owner has this permission by default and -// can grant this permission to others. For more information about permissions, see -// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. -// -// PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, [Error Responses]. -// -// - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. -// -// - MalformedXML - The XML provided does not match the schema. -// -// - OperationAborted - A conflicting conditional action is currently in progress -// against this resource. Please try again. -// -// - InternalError - The service was unable to apply the provided tag to the -// bucket. -// -// The following operations are related to PutBucketTagging : -// -// [GetBucketTagging] -// -// [DeleteBucketTagging] -// -// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html -// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html -// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { - if params == nil { - params = &PutBucketTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketTaggingInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for the TagSet and Tag elements. - // - // This member is required. - Tagging *types.Tagging - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketTaggingOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketTagging", - } -} - -// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketTaggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go deleted file mode 100644 index 70961e9a3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ /dev/null @@ -1,328 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// When you enable versioning on a bucket for the first time, it might take a -// short amount of time for the change to be fully propagated. While this change is -// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for -// requests to objects created or updated after enabling versioning. We recommend -// that you wait for 15 minutes after enabling versioning before issuing write -// operations ( PUT or DELETE ) on objects in the bucket. -// -// Sets the versioning state of an existing bucket. -// -// You can set the versioning state with one of the following values: -// -// Enabled—Enables versioning for the objects in the bucket. All objects added to -// the bucket receive a unique version ID. -// -// Suspended—Disables versioning for the objects in the bucket. All objects added -// to the bucket receive the version ID null. -// -// If the versioning state has never been set on a bucket, it has no versioning -// state; a [GetBucketVersioning]request does not return a versioning state value. -// -// In order to enable MFA Delete, you must be the bucket owner. If you are the -// bucket owner and want to enable MFA Delete in the bucket versioning -// configuration, you must include the x-amz-mfa request header and the Status and -// the MfaDelete request elements in a request to set the versioning state of the -// bucket. -// -// If you have an object expiration lifecycle configuration in your non-versioned -// bucket and you want to maintain the same permanent delete behavior when you -// enable versioning, you must add a noncurrent expiration policy. The noncurrent -// expiration lifecycle configuration will manage the deletes of the noncurrent -// object versions in the version-enabled bucket. (A version-enabled bucket -// maintains one current and zero or more noncurrent object versions.) For more -// information, see [Lifecycle and Versioning]. -// -// The following operations are related to PutBucketVersioning : -// -// [CreateBucket] -// -// [DeleteBucket] -// -// [GetBucketVersioning] -// -// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html -// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html -// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config -// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html -func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { - if params == nil { - params = &PutBucketVersioningInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketVersioningOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketVersioningInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for setting the versioning state. - // - // This member is required. - VersioningConfiguration *types.VersioningConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The concatenation of the authentication device's serial number, a space, and - // the value that is displayed on your authentication device. - MFA *string - - noSmithyDocumentSerde -} - -func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketVersioningOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketVersioning"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketVersioningInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketVersioning", - } -} - -// getPutBucketVersioningRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketVersioningInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketVersioningBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketVersioningInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketVersioningBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go deleted file mode 100644 index 0b81ce2a0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ /dev/null @@ -1,347 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the configuration of the website that is specified in the website -// subresource. To configure a bucket as a website, you can add this subresource on -// the bucket with website configuration information such as the file name of the -// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. -// -// This PUT action requires the S3:PutBucketWebsite permission. By default, only -// the bucket owner can configure the website attached to a bucket; however, bucket -// owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. -// -// To redirect all website requests sent to the bucket's website endpoint, you add -// a website configuration with the following elements. Because all requests are -// sent to another website, you don't need to provide index document name for the -// bucket. -// -// - WebsiteConfiguration -// -// - RedirectAllRequestsTo -// -// - HostName -// -// - Protocol -// -// If you want granular control over redirects, you can use the following elements -// to add routing rules that describe conditions for redirecting requests and -// information about the redirect destination. In this case, the website -// configuration must provide an index document for the bucket, because some -// requests might not be redirected. -// -// - WebsiteConfiguration -// -// - IndexDocument -// -// - Suffix -// -// - ErrorDocument -// -// - Key -// -// - RoutingRules -// -// - RoutingRule -// -// - Condition -// -// - HttpErrorCodeReturnedEquals -// -// - KeyPrefixEquals -// -// - Redirect -// -// - Protocol -// -// - HostName -// -// - ReplaceKeyPrefixWith -// -// - ReplaceKeyWith -// -// - HttpRedirectCode -// -// Amazon S3 has a limitation of 50 routing rules per website configuration. If -// you require more than 50 routing rules, you can use object redirect. For more -// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. -// -// The maximum request length is limited to 128 KB. -// -// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html -// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html -func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { - if params == nil { - params = &PutBucketWebsiteInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutBucketWebsiteOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutBucketWebsiteInput struct { - - // The bucket name. - // - // This member is required. - Bucket *string - - // Container for the request. - // - // This member is required. - WebsiteConfiguration *types.WebsiteConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see [RFC 1864]. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutBucketWebsiteOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketWebsite"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutBucketWebsiteInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutBucketWebsite", - } -} - -// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutBucketWebsiteInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) { - in := input.(*PutBucketWebsiteInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutBucketWebsiteBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go deleted file mode 100644 index d62dd1321..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ /dev/null @@ -1,1022 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "time" -) - -// Adds an object to a bucket. -// -// - Amazon S3 never adds partial objects; if you receive a success response, -// Amazon S3 added the entire object to the bucket. You cannot use PutObject to -// only update a single piece of metadata for an existing object. You must put the -// entire object with updated metadata if you want to update some values. -// -// - If your bucket uses the bucket owner enforced setting for Object Ownership, -// ACLs are disabled and no longer affect permissions. All objects written to the -// bucket by any account will be owned by the bucket owner. -// -// - Directory buckets - For directory buckets, you must make requests for this -// API operation to the Zonal endpoint. These endpoints support -// virtual-hosted-style requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information -// about endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Amazon S3 is a distributed system. If it receives multiple write requests for -// the same object simultaneously, it overwrites all but the last object written. -// However, Amazon S3 provides features that can modify this behavior: -// -// - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. -// -// This functionality is not supported for directory buckets. -// -// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 -// receives multiple write requests for the same object simultaneously, it stores -// all versions of the objects. For each write request that is made to the same -// object, Amazon S3 automatically generates a unique version ID of that object -// being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User -// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] -// . -// -// This functionality is not supported for directory buckets. -// -// Permissions -// -// - General purpose bucket permissions - The following permissions are required -// in your policies when your PutObject request includes specific headers. -// -// - s3:PutObject - To successfully complete the PutObject request, you must -// always have the s3:PutObject permission on a bucket to add an object to it. -// -// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject -// request, you must have the s3:PutObjectAcl . -// -// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject -// request, you must have the s3:PutObjectTagging . -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Data integrity with Content-MD5 -// -// - General purpose bucket - To ensure that data is not corrupted traversing -// the network, use the Content-MD5 header. When you use this header, Amazon S3 -// checks the object against the provided MD5 value and, if they do not match, -// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 -// digest, you can calculate the MD5 while putting the object to Amazon S3 and -// compare the returned ETag to the calculated MD5 value. -// -// - Directory bucket - This functionality is not supported for directory -// buckets. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// For more information about related Amazon S3 APIs, see the following: -// -// [CopyObject] -// -// [DeleteObject] -// -// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html -// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html -func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { - if params == nil { - params = &PutObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectInput struct { - - // The bucket name to which the PUT action was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the PUT action was initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon - // S3 User Guide. - // - // When adding a new object, you can use headers to grant ACL-based permissions to - // individual Amazon Web Services accounts or to predefined groups defined by - // Amazon S3. These permissions are then added to the ACL on the object. By - // default, all objects are private. Only the owner has full access control. For - // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. - // - // If the bucket that you're uploading objects to uses the bucket owner enforced - // setting for S3 Object Ownership, ACLs are disabled and no longer affect - // permissions. Buckets that use this setting only accept PUT requests that don't - // specify an ACL or PUT requests that specify bucket owner full control ACLs, such - // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL - // expressed in the XML format. PUT requests that contain other ACLs (for example, - // custom grants to certain Amazon Web Services accounts) fail and return a 400 - // error with the error code AccessControlListNotSupported . For more information, - // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - // - // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html - // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - ACL types.ObjectCannedACL - - // Object data. - Body io.Reader - - // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // - // General purpose buckets - Setting this header to true causes Amazon S3 to use - // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this - // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. - // - // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - // Can be used to specify caching behavior along the request/reply chain. For more - // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. - // - // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - CacheControl *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . - // - // For the x-amz-checksum-algorithm header, replace algorithm with the - // supported algorithm from the following list: - // - // - CRC-32 - // - // - CRC-32C - // - // - CRC-64NVME - // - // - SHA-1 - // - // - SHA-256 - // - // For more information, see [Checking object integrity] in the Amazon S3 User Guide. - // - // If the individual checksum value you provide through x-amz-checksum-algorithm - // doesn't match the checksum algorithm you set through - // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest - // error. - // - // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any - // request to upload an object with a retention period configured using Amazon S3 - // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. - // - // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the - // default checksum algorithm that's used for performance. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object - ChecksumAlgorithm types.ChecksumAlgorithm - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the object. The CRC-64NVME - // checksum is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. - // - // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length - ContentLength *int64 - - // The Base64 encoded 128-bit MD5 digest of the message (without the headers) - // according to RFC 1864. This header can be used as a message integrity check to - // verify that the data is the same data that was originally sent. Although it is - // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see [REST Authentication]. - // - // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any - // request to upload an object with a retention period configured using Amazon S3 - // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html - // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object - ContentMD5 *string - - // A standard MIME type describing the format of the contents. For more - // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. - // - // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type - ContentType *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The date and time at which the object is no longer cacheable. For more - // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. - // - // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 - Expires *time.Time - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to read the object data and its metadata. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the object ACL. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to write the ACL for the applicable object. - // - // - This functionality is not supported for directory buckets. - // - // - This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // Uploads the object only if the ETag (entity tag) value provided during the - // WRITE operation matches the ETag of the object in S3. If the ETag values do not - // match, the operation returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should fetch the - // object's ETag and retry the upload. - // - // Expects the ETag value as a string. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfMatch *string - - // Uploads the object only if the object key name does not already exist in the - // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. - // - // If a conflicting operation occurs during the upload S3 returns a 409 - // ConditionalRequestConflict response. On a 409 failure you should retry the - // upload. - // - // Expects the '*' (asterisk) character. - // - // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 - // User Guide. - // - // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html - // [RFC 7232]: https://tools.ietf.org/html/rfc7232 - IfNoneMatch *string - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // The Object Lock mode that you want to apply to this object. - // - // This functionality is not supported for directory buckets. - ObjectLockMode types.ObjectLockMode - - // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. - // - // This functionality is not supported for directory buckets. - ObjectLockRetainUntilDate *time.Time - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256 ). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // Specifies the Amazon Web Services KMS Encryption Context as an additional - // encryption context to use for object encryption. The value of this header is a - // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption - // context as key-value pairs. This value is stored as object metadata and - // automatically gets passed on to Amazon Web Services KMS for future GetObject - // operations on this object. - // - // General purpose buckets - This value must be explicitly added during CopyObject - // operations if you want an additional encryption context for your object. For - // more information, see [Encryption context]in the Amazon S3 User Guide. - // - // Directory buckets - You can optionally provide an explicit encryption context - // value. The value must match the default encryption context - the bucket Amazon - // Resource Name (ARN). An additional encryption context value is not supported. - // - // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context - SSEKMSEncryptionContext *string - - // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object - // encryption. If the KMS key doesn't exist in the same account that's issuing the - // command, you must use the full Key ARN not the Key ID. - // - // General purpose buckets - If you specify x-amz-server-side-encryption with - // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key - // Alias) of the KMS key to use. If you specify - // x-amz-server-side-encryption:aws:kms or - // x-amz-server-side-encryption:aws:kms:dsse , but do not provide - // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. - // - // Directory buckets - If you specify x-amz-server-side-encryption with aws:kms , - // the x-amz-server-side-encryption-aws-kms-key-id header is implicitly assigned - // the ID of the KMS symmetric encryption customer managed key that's configured - // for your directory bucket's default encryption setting. If you want to specify - // the x-amz-server-side-encryption-aws-kms-key-id header explicitly, you can only - // specify it with the ID (Key ID or Key ARN) of the KMS customer managed key - // that's configured for your directory bucket's default encryption setting. - // Otherwise, you get an HTTP 400 Bad Request error. Only use the key ID or key - // ARN. The key alias format of the KMS key isn't supported. Your SSE-KMS - // configuration can only support 1 [customer managed key]per directory bucket for the lifetime of the - // bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. - // - // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk - SSEKMSKeyId *string - - // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). - // - // - General purpose buckets - You have four mutually exclusive options to - // protect data using server-side encryption in Amazon S3, depending on how you - // choose to manage the encryption keys. Specifically, the encryption key options - // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or - // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with - // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You - // can optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 - // User Guide. - // - // - Directory buckets - For directory buckets, there are only two supported - // options for server-side encryption: server-side encryption with Amazon S3 - // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys - // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses - // the desired encryption configuration and you don't override the bucket default - // encryption in your CreateSession requests or PUT object requests. Then, new - // objects are automatically encrypted with the desired encryption settings. For - // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about - // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. - // - // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the - // encryption request headers must match the encryption settings that are specified - // in the CreateSession request. You can't override the values of the encryption - // settings ( x-amz-server-side-encryption , - // x-amz-server-side-encryption-aws-kms-key-id , - // x-amz-server-side-encryption-context , and - // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the - // CreateSession request. You don't need to explicitly specify these encryption - // settings values in Zonal endpoint API calls, and Amazon S3 will use the - // encryption settings values from the CreateSession request to protect new - // objects in the directory bucket. - // - // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the - // session token refreshes automatically to avoid service interruptions when a - // session expires. The CLI or the Amazon Web Services SDKs use the bucket's - // default encryption configuration for the CreateSession request. It's not - // supported to override the encryption settings values in the CreateSession - // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption - // request headers must match the default encryption configuration of the directory - // bucket. - // - // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html - // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html - // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - ServerSideEncryption types.ServerSideEncryption - - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. - // - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. - // - // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") - // - // This functionality is not supported for directory buckets. - Tagging *string - - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. For information about object - // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. - // - // In the following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: - // - // x-amz-website-redirect-location: /anotherPage.html - // - // In the following example, the request header sets the object redirect to - // another website: - // - // x-amz-website-redirect-location: http://www.example.com/ - // - // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the - // Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. - // - // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html - // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html - // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html - WebsiteRedirectLocation *string - - // Specifies the offset for appending data to existing objects in bytes. The - // offset must be equal to the size of the existing object being appended to. If no - // object exists, setting this header to 0 will create a new object. - // - // This functionality is only supported for objects in the Amazon S3 Express One - // Zone storage class in directory buckets. - WriteOffsetBytes *int64 - - noSmithyDocumentSerde -} - -func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type PutObjectOutput struct { - - // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is - // only present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This header is - // present if the object was uploaded with the CRC-64NVME checksum algorithm, or - // if it was uploaded without a checksum (and Amazon S3 added the default checksum, - // CRC-64NVME , to the uploaded object). For more information about how checksums - // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide]. - // - // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // This header specifies the checksum type of the object, which determines how - // part-level checksums are combined to create an object-level checksum for - // multipart objects. For PutObject uploads, the checksum type is always - // FULL_OBJECT . You can use this header as a data integrity check to verify that - // the checksum type that is received is the same checksum that was specified. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType types.ChecksumType - - // Entity tag for the uploaded object. - // - // General purpose buckets - To ensure that data is not corrupted traversing the - // network, for objects where the ETag is the MD5 digest of the object, you can - // calculate the MD5 while putting an object to Amazon S3 and compare the returned - // ETag to the calculated MD5 value. - // - // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 - // digest of the object. - ETag *string - - // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User - // Guide, the response includes this header. It includes the expiry-date and - // rule-id key-value pairs that provide information about object expiration. The - // value of the rule-id is URL-encoded. - // - // Object expiration information is not returned in directory buckets and this - // header returns the value " NotImplemented " in all responses for directory - // buckets. - // - // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html - Expiration *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a Base64 encoded string of a - // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. - // This value is stored as object metadata and automatically gets passed on to - // Amazon Web Services KMS for future GetObject operations on this object. - SSEKMSEncryptionContext *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3. - ServerSideEncryption types.ServerSideEncryption - - // The size of the object in bytes. This value is only be present if you append - // to an object. - // - // This functionality is only supported for objects in the Amazon S3 Express One - // Zone storage class in directory buckets. - Size *int64 - - // Version ID of the object. - // - // If you enable versioning for a bucket, Amazon S3 automatically generates a - // unique version ID for the object being stored. Amazon S3 returns this ID in the - // response. When you enable versioning for a bucket, if Amazon S3 receives - // multiple write requests for the same object simultaneously, it stores all of the - // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User - // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. - // - // This functionality is not supported for directory buckets. - // - // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html - // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = add100Continue(stack, options); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObject", - } -} - -// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: true, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, -func getPutObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignPutObject is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &PutObjectInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, - c.client.addOperationPutObjectMiddlewares, - presignConverter(options).convertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - addPutObjectPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go deleted file mode 100644 index 58e081b4c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ /dev/null @@ -1,501 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Uses the acl subresource to set the access control list (ACL) permissions for a -// new or existing object in an S3 bucket. You must have the WRITE_ACP permission -// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User -// Guide. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// Depending on your application needs, you can choose to set the ACL on an object -// using either the request body or the headers. For example, if you have an -// existing application that updates a bucket ACL using the request body, you can -// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User -// Guide. -// -// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, -// ACLs are disabled and no longer affect permissions. You must use policies to -// grant access to your bucket and the objects in it. Requests to set ACLs or -// update ACLs fail and return the AccessControlListNotSupported error code. -// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the -// Amazon S3 User Guide. -// -// Permissions You can set access permissions using one of the following methods: -// -// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a -// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined -// set of grantees and permissions. Specify the canned ACL name as the value of -// x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see [Canned ACL]. -// -// - Specify access permissions explicitly with the x-amz-grant-read , -// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use x-amz-acl -// header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. -// -// You specify each grantee as a type=value pair, where the type is one of the -// -// following: -// -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// -// - uri – if you are granting permissions to a predefined group -// -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// For example, the following x-amz-grant-read header grants list objects -// -// permission to the two Amazon Web Services accounts identified by their email -// addresses. -// -// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. -// -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (using request elements) in the following ways: -// -// - By the person's ID: -// -// <>ID<><>GranteesEmail<> -// -// DisplayName is optional and ignored in the request. -// -// - By URI: -// -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// - By Email address: -// -// <>Grantees@email.com<>lt;/Grantee> -// -// The grantee is resolved to the CanonicalUser and, in a response to a GET Object -// -// acl request, appears as the CanonicalUser. -// -// Using email addresses to specify a grantee is only supported in the following -// -// Amazon Web Services Regions: -// -// - US East (N. Virginia) -// -// - US West (N. California) -// -// - US West (Oregon) -// -// - Asia Pacific (Singapore) -// -// - Asia Pacific (Sydney) -// -// - Asia Pacific (Tokyo) -// -// - Europe (Ireland) -// -// - South America (São Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the -// -// Amazon Web Services General Reference. -// -// Versioning The ACL of an object is set at the object version level. By default, -// PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. -// -// The following operations are related to PutObjectAcl : -// -// [CopyObject] -// -// [GetObject] -// -// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html -// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html -// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { - if params == nil { - params = &PutObjectAclInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectAclOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectAclInput struct { - - // The bucket name that contains the object to which you want to attach the ACL. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Key for which the PUT action was initiated. - // - // This member is required. - Key *string - - // The canned ACL to apply to the object. For more information, see [Canned ACL]. - // - // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL - ACL types.ObjectCannedACL - - // Contains the elements that set the ACL permissions for an object per grantee. - AccessControlPolicy *types.AccessControlPolicy - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as - // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to [RFC 1864.>] - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - // - // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Allows grantee to list the objects in the bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Allows grantee to read the bucket ACL. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantReadACP *string - - // Allows grantee to create new objects in the bucket. - // - // For the bucket and object owners of existing objects, also allows deletions and - // overwrites of those objects. - GrantWrite *string - - // Allows grantee to write the ACL for the applicable bucket. - // - // This functionality is not supported for Amazon S3 on Outposts. - GrantWriteACP *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Version ID used to reference a specific version of the object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type PutObjectAclOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectAcl"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectAclInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectAcl", - } -} - -// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectAclInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectAclRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectAclBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutObjectAclBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectAclInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectAclBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go deleted file mode 100644 index 691aa6c19..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ /dev/null @@ -1,316 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Applies a legal hold configuration to the specified object. For more -// information, see [Locking Objects]. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { - if params == nil { - params = &PutObjectLegalHoldInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectLegalHoldOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectLegalHoldInput struct { - - // The bucket name containing the object that you want to place a legal hold on. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object that you want to place a legal hold on. - // - // This member is required. - Key *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Container element for the legal hold configuration you want to apply to the - // specified object. - LegalHold *types.ObjectLockLegalHold - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The version ID of the object that you want to place a legal hold on. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectLegalHoldOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLegalHold"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectLegalHoldInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectLegalHold", - } -} - -// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectLegalHoldInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectLegalHoldInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectLegalHoldBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go deleted file mode 100644 index 82b0dc9de..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ /dev/null @@ -1,307 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see [Locking Objects]. -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must select -// one. You cannot specify Days and Years at the same time. -// -// - You can enable Object Lock for new or existing buckets. For more -// information, see [Configuring Object Lock]. -// -// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { - if params == nil { - params = &PutObjectLockConfigurationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectLockConfigurationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectLockConfigurationInput struct { - - // The bucket whose Object Lock configuration you want to create or replace. - // - // This member is required. - Bucket *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The Object Lock configuration that you want to apply to the specified bucket. - ObjectLockConfiguration *types.ObjectLockConfiguration - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // A token to allow Object Lock to be enabled for an existing bucket. - Token *string - - noSmithyDocumentSerde -} - -func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectLockConfigurationOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLockConfiguration"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectLockConfigurationInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectLockConfiguration", - } -} - -// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectLockConfigurationInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting -// a provided bucket member valueand a boolean indicating if the input has a -// modeled bucket name, -func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectLockConfigurationInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectLockConfigurationBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go deleted file mode 100644 index 0887dda7c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ /dev/null @@ -1,323 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Places an Object Retention configuration on an object. For more information, -// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order -// to place an Object Retention configuration on objects. Bypassing a Governance -// Retention configuration requires the s3:BypassGovernanceRetention permission. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html -func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { - if params == nil { - params = &PutObjectRetentionInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectRetentionOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectRetentionInput struct { - - // The bucket name that contains the object you want to apply this Object - // Retention configuration to. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // The key name for the object that you want to apply this Object Retention - // configuration to. - // - // This member is required. - Key *string - - // Indicates whether this action should bypass Governance-mode restrictions. - BypassGovernanceRetention *bool - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The container element for the Object Retention configuration. - Retention *types.ObjectLockRetention - - // The version ID for the object that you want to apply this Object Retention - // configuration to. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectRetentionOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectRetention"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectRetentionInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectRetention", - } -} - -// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectRetentionInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectRetentionBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectRetentionInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectRetentionBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go deleted file mode 100644 index 0877ae0d8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ /dev/null @@ -1,359 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Sets the supplied tag-set to an object that already exists in a bucket. A tag -// is a key-value pair. For more information, see [Object Tagging]. -// -// You can associate tags with an object by sending a PUT request against the -// tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see [GetObjectTagging]. -// -// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. -// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. -// -// To use this operation, you must have permission to perform the -// s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. -// -// To put tags of any other version, use the versionId query parameter. You also -// need permission for the s3:PutObjectVersionTagging action. -// -// PutObjectTagging has the following special errors. For more Amazon S3 errors -// see, [Error Responses]. -// -// - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see [Object Tagging]. -// -// - MalformedXML - The XML provided does not match the schema. -// -// - OperationAborted - A conflicting conditional action is currently in progress -// against this resource. Please try again. -// -// - InternalError - The service was unable to apply the provided tag to the -// object. -// -// The following operations are related to PutObjectTagging : -// -// [GetObjectTagging] -// -// [DeleteObjectTagging] -// -// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html -// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html -// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html -// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html -func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { - if params == nil { - params = &PutObjectTaggingInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutObjectTaggingOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutObjectTaggingInput struct { - - // The bucket name containing the object. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Name of the object key. - // - // This member is required. - Key *string - - // Container for the TagSet and Tag elements - // - // This member is required. - Tagging *types.Tagging - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash for the request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // The versionId of the object that the tag-set will be added to. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type PutObjectTaggingOutput struct { - - // The versionId of the object the tag-set was added to. - VersionId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectTagging"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutObjectTaggingInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutObjectTagging", - } -} - -// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutObjectTaggingInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) { - in := input.(*PutObjectTaggingInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutObjectTaggingBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go deleted file mode 100644 index d4d05ffee..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ /dev/null @@ -1,310 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. -// -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the -// PublicAccessBlock configurations are different between the bucket and the -// account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. -// -// For more information about when Amazon S3 considers a bucket or an object -// public, see [The Meaning of "Public"]. -// -// The following operations are related to PutPublicAccessBlock : -// -// [GetPublicAccessBlock] -// -// [DeletePublicAccessBlock] -// -// [GetBucketPolicyStatus] -// -// [Using Amazon S3 Block Public Access] -// -// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html -// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html -// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html -// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { - if params == nil { - params = &PutPublicAccessBlockInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*PutPublicAccessBlockOutput) - out.ResultMetadata = metadata - return out, nil -} - -type PutPublicAccessBlockInput struct { - - // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want - // to set. - // - // This member is required. - Bucket *string - - // The PublicAccessBlock configuration that you want to apply to this Amazon S3 - // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in - // the Amazon S3 User Guide. - // - // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status - // - // This member is required. - PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The MD5 hash of the PutPublicAccessBlock request body. - // - // For requests made using the Amazon Web Services Command Line Interface (CLI) or - // Amazon Web Services SDKs, this field is calculated automatically. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - noSmithyDocumentSerde -} - -func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.UseS3ExpressControlEndpoint = ptr.Bool(true) -} - -type PutPublicAccessBlockOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "PutPublicAccessBlock"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *PutPublicAccessBlockInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "PutPublicAccessBlock", - } -} - -// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum -// algorithm value provided as input. -func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*PutPublicAccessBlockInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, - RequireChecksum: true, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) { - in := input.(*PutPublicAccessBlockInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getPutPublicAccessBlockBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go deleted file mode 100644 index 697b621c4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ /dev/null @@ -1,458 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// This operation is not supported for directory buckets. -// -// # Restores an archived copy of an object back into Amazon S3 -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// This action performs the following types of requests: -// -// - restore an archive - Restore an archived object -// -// For more information about the S3 structure in the request body, see the -// following: -// -// [PutObject] -// -// [Managing Access with ACLs] -// - in the Amazon S3 User Guide -// -// [Protecting Data Using Server-Side Encryption] -// - in the Amazon S3 User Guide -// -// Permissions To use this operation, you must have permissions to perform the -// s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] -// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. -// -// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval -// Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 -// Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are -// not accessible in real time. For objects in the S3 Glacier Flexible Retrieval -// Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first -// initiate a restore request, and then wait until a temporary copy of the object -// is available. If you want a permanent copy of the object, create a copy of it in -// the Amazon S3 Standard storage class in your S3 bucket. To access an archived -// object, you must restore the object for the duration (number of days) that you -// specify. For objects in the Archive Access or Deep Archive Access tiers of S3 -// Intelligent-Tiering, you must first initiate a restore request, and then wait -// until the object is moved into the Frequent Access tier. -// -// To restore a specific object version, you can provide a version ID. If you -// don't provide a version ID, Amazon S3 restores the current version. -// -// When restoring an archived object, you can specify one of the following data -// access tier options in the Tier element of the request body: -// -// - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or -// S3 Intelligent-Tiering Archive tier when occasional urgent requests for -// restoring archives are required. For all but the largest archived objects (250 -// MB+), data accessed using Expedited retrievals is typically made available -// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for -// Expedited retrievals is available when you need it. Expedited retrievals and -// provisioned capacity are not available for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. -// -// - Standard - Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for retrieval requests -// that do not specify the retrieval option. Standard retrievals typically finish -// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -// typically finish within 12 hours for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard -// retrievals are free for objects stored in S3 Intelligent-Tiering. -// -// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible -// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve -// large amounts, even petabytes, of data at no cost. Bulk retrievals typically -// finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk -// retrievals are also the lowest-cost retrieval option when restoring objects from -// S3 Glacier Deep Archive. They typically finish within 48 hours for objects -// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering -// Deep Archive tier. -// -// For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. -// -// You can use Amazon S3 restore speed upgrade to change the restore speed to a -// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon -// S3 User Guide. -// -// To get the status of object restoration, you can send a HEAD request. -// Operations return the x-amz-restore header, which provides information about -// the restoration status, in the response. You can use Amazon S3 event -// notifications to notify you when a restore is initiated or completed. For more -// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. -// -// After restoring an archived object, you can update the restoration period by -// reissuing the request with a new period. Amazon S3 updates the restoration -// period relative to the current time and charges only for the request-there are -// no data transfer charges. You cannot update the restoration period when Amazon -// S3 is actively processing your current restore request for the object. -// -// If your bucket has a lifecycle configuration with a rule that includes an -// expiration action, the object expiration overrides the life span that you -// specify in a restore request. For example, if you restore an object copy for 10 -// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the -// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] -// in Amazon S3 User Guide. -// -// Responses A successful action returns either the 200 OK or 202 Accepted status -// code. -// -// - If the object is not previously restored, then Amazon S3 returns 202 -// Accepted in the response. -// -// - If the object is previously restored, Amazon S3 returns 200 OK in the -// response. -// -// - Special errors: -// -// - Code: RestoreAlreadyInProgress -// -// - Cause: Object restore is already in progress. -// -// - HTTP Status Code: 409 Conflict -// -// - SOAP Fault Code Prefix: Client -// -// - Code: GlacierExpeditedRetrievalNotAvailable -// -// - Cause: expedited retrievals are currently not available. Try again later. -// (Returned if there is insufficient capacity to process the Expedited request. -// This error applies only to Expedited retrievals and not to S3 Standard or Bulk -// retrievals.) -// -// - HTTP Status Code: 503 -// -// - SOAP Fault Code Prefix: N/A -// -// The following operations are related to RestoreObject : -// -// [PutBucketLifecycleConfiguration] -// -// [GetBucketNotificationConfiguration] -// -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources -// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html -// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html -// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html -// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html -// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html -func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { - if params == nil { - params = &RestoreObjectInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RestoreObjectOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RestoreObjectInput struct { - - // The bucket name containing the object to restore. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the action was initiated. - // - // This member is required. - Key *string - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Container for restore job parameters. - RestoreRequest *types.RestoreRequest - - // VersionId used to reference a specific version of the object. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type RestoreObjectOutput struct { - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Indicates the path in the provided S3 output location where Select results will - // be restored to. - RestoreOutputPath *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreObject"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *RestoreObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RestoreObject", - } -} - -// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. -func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*RestoreObjectInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getRestoreObjectRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: false, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getRestoreObjectBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getRestoreObjectBucketMember(input interface{}) (*string, bool) { - in := input.(*RestoreObjectInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getRestoreObjectBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go deleted file mode 100644 index b22b0dd0e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ /dev/null @@ -1,485 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithysync "github.com/aws/smithy-go/sync" - "sync" -) - -// This operation is not supported for directory buckets. -// -// This action filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the SQL -// expression, you must also specify a data serialization format (JSON, CSV, or -// Apache Parquet) of the object. Amazon S3 uses this format to parse object data -// into records, and returns only records that match the specified SQL expression. -// You must also specify the data serialization format for the response. -// -// This functionality is not supported for Amazon S3 on Outposts. -// -// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User -// Guide. -// -// Permissions You must have the s3:GetObject permission for this operation. -// Amazon S3 Select does not support anonymous access. For more information about -// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. -// -// Object Data Formats You can use Amazon S3 Select to query objects that have the -// following format properties: -// -// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. -// -// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. -// -// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. -// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports -// for CSV and JSON files. Amazon S3 Select supports columnar compression for -// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object -// compression for Parquet objects. -// -// - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. -// -// For objects that are encrypted with customer-provided encryption keys (SSE-C), -// -// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. -// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. -// -// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon -// -// Web Services KMS keys (SSE-KMS), server-side encryption is handled -// transparently, so you don't need to specify anything. For more information about -// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 -// User Guide. -// -// Working with the Response Body Given the response size is unknown, Amazon S3 -// Select streams the response as a series of messages and includes a -// Transfer-Encoding header with chunked as its value in the response. For more -// information, see [Appendix: SelectObjectContent Response]. -// -// GetObject Support The SelectObjectContent action does not support the following -// GetObject functionality. For more information, see [GetObject]. -// -// - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of -// bytes of an object to return. -// -// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the -// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING -// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or -// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or -// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. -// -// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] -// -// The following operations are related to SelectObjectContent : -// -// [GetObject] -// -// [GetBucketLifecycleConfiguration] -// -// [PutBucketLifecycleConfiguration] -// -// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html -// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html -// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html -// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange -// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList -// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html -// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html -// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html -// -// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html -// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html -func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { - if params == nil { - params = &SelectObjectContentInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*SelectObjectContentOutput) - out.ResultMetadata = metadata - return out, nil -} - -// Learn Amazon S3 Select is no longer available to new customers. Existing -// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] -// -// Request to filter the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the SQL -// expression, you must specify a data serialization format (JSON or CSV) of the -// object. Amazon S3 uses this to parse object data into records. It returns only -// records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see [S3Select API Documentation]. -// -// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html -type SelectObjectContentInput struct { - - // The S3 bucket. - // - // This member is required. - Bucket *string - - // The expression that is used to query the object. - // - // This member is required. - Expression *string - - // The type of the provided expression (for example, SQL). - // - // This member is required. - ExpressionType types.ExpressionType - - // Describes the format of the data in the object that is being queried. - // - // This member is required. - InputSerialization *types.InputSerialization - - // The object key. - // - // This member is required. - Key *string - - // Describes the format of the data that you want Amazon S3 to return in response. - // - // This member is required. - OutputSerialization *types.OutputSerialization - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Specifies if periodic request progress information should be enabled. - RequestProgress *types.RequestProgress - - // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerAlgorithm *string - - // The server-side encryption (SSE) customer managed key. This parameter is needed - // only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKey *string - - // The MD5 server-side encryption (SSE) customer managed key. This parameter is - // needed only when the object was created using a checksum algorithm. For more - // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. - // - // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // Specifies the byte range of the object to get the records from. A record is - // processed when its first byte is contained by the range. This parameter is - // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. - // - // ScanRange may be used in the following ways: - // - // - 50100 - process only the records starting between the bytes 50 and 100 - // (inclusive, counting from zero) - // - // - 50 - process only the records starting after the byte 50 - // - // - 50 - process only the records within the last 50 bytes of the file. - ScanRange *types.ScanRange - - noSmithyDocumentSerde -} - -func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - -} - -type SelectObjectContentOutput struct { - eventStream *SelectObjectContentEventStream - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -// GetStream returns the type to interact with the event stream. -func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { - return o.eventStream -} - -func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "SelectObjectContent"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *SelectObjectContentInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "SelectObjectContent", - } -} - -// getSelectObjectContentBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, -func getSelectObjectContentBucketMember(input interface{}) (*string, bool) { - in := input.(*SelectObjectContentInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getSelectObjectContentBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation. -// -// For testing and mocking the event stream this type should be initialized via -// the NewSelectObjectContentEventStream constructor function. Using the functional options -// to pass in nested mock behavior. -type SelectObjectContentEventStream struct { - // SelectObjectContentEventStreamReader is the EventStream reader for the - // SelectObjectContentEventStream events. This value is automatically set by the - // SDK when the API call is made Use this member when unit testing your code with - // the SDK to mock out the EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader - - done chan struct{} - closeOnce sync.Once - err *smithysync.OnceErr -} - -// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. -// This function should only be used for testing and mocking the SelectObjectContentEventStream -// stream within your application. -// -// The Reader member must be set before reading events from the stream. -func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { - es := &SelectObjectContentEventStream{ - done: make(chan struct{}), - err: smithysync.NewOnceErr(), - } - for _, fn := range optFns { - fn(es) - } - return es -} - -// Events returns a channel to read events from. -func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream { - return es.Reader.Events() -} - -// Close closes the stream. This will also cause the stream to be closed. -// Close must be called when done using the stream API. Not calling Close -// may result in resource leaks. -// -// Will close the underlying EventStream writer and reader, and no more events can be -// sent or received. -func (es *SelectObjectContentEventStream) Close() error { - es.closeOnce.Do(es.safeClose) - return es.Err() -} - -func (es *SelectObjectContentEventStream) safeClose() { - close(es.done) - - es.Reader.Close() -} - -// Err returns any error that occurred while reading or writing EventStream Events -// from the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.err.Err(); err != nil { - return err - } - - if err := es.Reader.Err(); err != nil { - return err - } - - return nil -} - -func (es *SelectObjectContentEventStream) waitStreamClose() { - type errorSet interface { - ErrorSet() <-chan struct{} - } - - var outputErrCh <-chan struct{} - if v, ok := es.Reader.(errorSet); ok { - outputErrCh = v.ErrorSet() - } - var outputClosedCh <-chan struct{} - if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { - outputClosedCh = v.Closed() - } - - select { - case <-es.done: - case <-outputErrCh: - es.err.SetError(es.Reader.Err()) - es.Close() - - case <-outputClosedCh: - if err := es.Reader.Err(); err != nil { - es.err.SetError(es.Reader.Err()) - } - es.Close() - - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go deleted file mode 100644 index 30eca28f2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ /dev/null @@ -1,687 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" -) - -// Uploads a part in a multipart upload. -// -// In this operation, you provide new data as a part of an object in your request. -// However, you have an option to specify your existing Amazon S3 object as a data -// source for the part you are uploading. To upload a part from an existing object, -// you use the [UploadPartCopy]operation. -// -// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In -// response to your initiate request, Amazon S3 returns an upload ID, a unique -// identifier that you must include in your upload part request. -// -// Part numbers can be any number from 1 to 10,000, inclusive. A part number -// uniquely identifies a part and also defines its position within the object being -// created. If you upload a new part using the same part number that was used with -// a previous part, the previously uploaded part is overwritten. -// -// For information about maximum and minimum part sizes and other multipart upload -// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. -// -// After you initiate multipart upload and upload one or more parts, you must -// either complete or abort multipart upload in order to stop getting charged for -// storage of the uploaded parts. Only after you either complete or abort multipart -// upload, Amazon S3 frees up the parts storage and stops charging you for the -// parts storage. -// -// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Permissions -// - General purpose bucket permissions - To perform a multipart upload with -// encryption using an Key Management Service key, the requester must have -// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The -// requester must also have permissions for the kms:GenerateDataKey action for -// the CreateMultipartUpload API. Then, the requester needs permissions for the -// kms:Decrypt action on the UploadPart and UploadPartCopy APIs. -// -// These permissions are required because Amazon S3 must decrypt and read data -// -// from the encrypted file parts before it completes the multipart upload. For more -// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For -// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] -// and [Multipart upload API and permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation -// for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see [CreateSession]CreateSession . -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// Data integrity General purpose bucket - To ensure that data is not corrupted -// traversing the network, specify the Content-MD5 header in the upload part -// request. Amazon S3 checks the part data against the provided MD5 value. If they -// do not match, Amazon S3 returns an error. If the upload request is signed with -// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. -// -// Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. -// -// Encryption -// - General purpose bucket - Server-side encryption is for data encryption at -// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers -// and decrypts it when you access it. You have mutually exclusive options to -// protect data using server-side encryption in Amazon S3, depending on how you -// choose to manage the encryption keys. Specifically, the encryption key options -// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and -// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side -// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally -// tell Amazon S3 to encrypt data at rest using server-side encryption with other -// key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). -// -// Server-side encryption is supported by the S3 Multipart Upload operations. -// -// Unless you are using a customer-provided encryption key (SSE-C), you don't need -// to specify the encryption parameters in each UploadPart request. Instead, you -// only need to specify the server-side encryption parameters in the initial -// Initiate Multipart request. For more information, see [CreateMultipartUpload]. -// -// If you request server-side encryption using a customer-provided encryption key -// -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// -// - x-amz-server-side-encryption-customer-algorithm -// -// - x-amz-server-side-encryption-customer-key -// -// - x-amz-server-side-encryption-customer-key-MD5 -// -// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). -// -// Special errors -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// - SOAP Fault Code Prefix: Client -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to UploadPart : -// -// [CreateMultipartUpload] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html -// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// -// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { - if params == nil { - params = &UploadPartInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadPartOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadPartInput struct { - - // The name of the bucket to which the multipart upload was initiated. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Part number of part being uploaded. This is a positive integer between 1 and - // 10,000. - // - // This member is required. - PartNumber *int32 - - // Upload ID identifying the multipart upload whose part is being uploaded. - // - // This member is required. - UploadId *string - - // Object data. - Body io.Reader - - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding - // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // If you provide an individual checksum, Amazon S3 ignores any provided - // ChecksumAlgorithm parameter. - // - // This checksum algorithm must be the same for all parts and it match the - // checksum value supplied in the CreateMultipartUpload request. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumAlgorithm types.ChecksumAlgorithm - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32 checksum of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32C checksum of the object. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. - ContentLength *int64 - - // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. - // - // This functionality is not supported for directory buckets. - ContentMD5 *string - - // The account ID of the expected bucket owner. If the account ID that you provide - // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. - // - // This functionality is not supported for directory buckets. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.Key = in.Key - -} - -type UploadPartOutput struct { - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is - // only present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // Entity tag for the uploaded object. - ETag *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPart"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addRequestChecksumMetricsTracking(stack, options); err != nil { - return err - } - if err = addOpUploadPartValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = add100Continue(stack, options); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { - return err - } - if err = addUploadPartUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UploadPartInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadPart", - } -} - -// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. -func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { - in := input.(*UploadPartInput) - if len(in.ChecksumAlgorithm) == 0 { - return "", false - } - return string(in.ChecksumAlgorithm), true -} - -func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: getUploadPartRequestAlgorithmMember, - RequireChecksum: false, - RequestChecksumCalculation: options.RequestChecksumCalculation, - EnableTrailingChecksum: true, - EnableComputeSHA256PayloadHash: true, - EnableDecodedContentLengthHeader: true, - }) -} - -// getUploadPartBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getUploadPartBucketMember(input interface{}) (*string, bool) { - in := input.(*UploadPartInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUploadPartBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} - -// PresignUploadPart is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &UploadPartInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, - c.client.addOperationUploadPartMiddlewares, - presignConverter(options).convertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - addUploadPartPayloadAsUnsigned, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} - -func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error { - v4.RemoveContentSHA256HeaderMiddleware(stack) - v4.RemoveComputePayloadSHA256Middleware(stack) - return v4.AddUnsignedPayloadMiddleware(stack) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go deleted file mode 100644 index 10cc2e748..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ /dev/null @@ -1,659 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "time" -) - -// Uploads a part by copying data from an existing object as data source. To -// specify the data source, you add the request header x-amz-copy-source in your -// request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. -// -// For information about maximum and minimum part sizes and other multipart upload -// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. -// -// Instead of copying data from an existing object as part data, you might use the [UploadPart] -// action to upload new data as a part of an object in your request. -// -// You must initiate a multipart upload before you can upload any part. In -// response to your initiate request, Amazon S3 returns the upload ID, a unique -// identifier that you must include in your upload part request. -// -// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User -// Guide. For information about copying objects using a single atomic action vs. a -// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. -// -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . -// Path-style requests are not supported. For more information about endpoints in -// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about -// endpoints in Local Zones, see [Available Local Zone for directory buckets]in the Amazon S3 User Guide. -// -// Authentication and authorization All UploadPartCopy requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. -// -// Directory buckets - You must use IAM credentials to authenticate and authorize -// your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. -// -// Amazon Web Services CLI or SDKs handles authentication and authorization on -// your behalf. -// -// Permissions You must have READ access to the source object and WRITE access to -// the destination bucket. -// -// - General purpose bucket permissions - You must have the permissions in a -// policy based on the bucket types of your source bucket and destination bucket in -// an UploadPartCopy operation. -// -// - If the source object is in a general purpose bucket, you must have the -// s3:GetObject permission to read the source object that is being copied. -// -// - If the destination bucket is a general purpose bucket, you must have the -// s3:PutObject permission to write the object copy to the destination bucket. -// -// - To perform a multipart upload with encryption using an Key Management -// Service key, the requester must have permission to the kms:Decrypt and -// kms:GenerateDataKey actions on the key. The requester must also have -// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload -// API. Then, the requester needs permissions for the kms:Decrypt action on the -// UploadPart and UploadPartCopy APIs. These permissions are required because -// Amazon S3 must decrypt and read data from the encrypted file parts before it -// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] -// in the Amazon S3 User Guide. For information about the permissions required to -// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. -// -// - Directory bucket permissions - You must have permissions in a bucket policy -// or an IAM identity-based policy based on the source and destination bucket types -// in an UploadPartCopy operation. -// -// - If the source object that you want to copy is in a directory bucket, you -// must have the s3express:CreateSession permission in the Action element of a -// policy to read the object. By default, the session is in the ReadWrite mode. -// If you want to restrict the access, you can explicitly set the -// s3express:SessionMode condition key to ReadOnly on the copy source bucket. -// -// - If the copy destination is a directory bucket, you must have the -// s3express:CreateSession permission in the Action element of a policy to write -// the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. -// -// If the object is encrypted with SSE-KMS, you must also have the -// -// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies -// and KMS key policies for the KMS key. -// -// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. -// -// Encryption -// -// - General purpose buckets - For information about using server-side -// encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see [CopyObject]and [UploadPart]. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: server-side encryption with Amazon S3 -// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys -// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. -// -// For directory buckets, when you perform a CreateMultipartUpload operation and an -// -// UploadPartCopy operation, the request headers you provide in the -// CreateMultipartUpload request must match the default encryption configuration -// of the destination bucket. -// -// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from -// -// general purpose buckets to directory buckets, from directory buckets to general -// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon -// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted -// object. -// -// Special errors -// -// - Error Code: NoSuchUpload -// -// - Description: The specified multipart upload does not exist. The upload ID -// might be invalid, or the multipart upload might have been aborted or completed. -// -// - HTTP Status Code: 404 Not Found -// -// - Error Code: InvalidRequest -// -// - Description: The specified copy source is not supported as a byte-range -// copy source. -// -// - HTTP Status Code: 400 Bad Request -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket-name.s3express-zone-id.region-code.amazonaws.com . -// -// The following operations are related to UploadPartCopy : -// -// [CreateMultipartUpload] -// -// [UploadPart] -// -// [CompleteMultipartUpload] -// -// [AbortMultipartUpload] -// -// [ListParts] -// -// [ListMultipartUploads] -// -// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html -// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html -// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html -// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html -// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html -// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html -// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions -// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html -// [Available Local Zone for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html -// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html -// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html -// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html -// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html -// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html -// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html -// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html -// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html -// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html -// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html -// -// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html -func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { - if params == nil { - params = &UploadPartCopyInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*UploadPartCopyOutput) - out.ResultMetadata = metadata - return out, nil -} - -type UploadPartCopyInput struct { - - // The bucket name. - // - // Directory buckets - When you use this operation with a directory bucket, you - // must use virtual-hosted-style requests in the format - // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests - // are not supported. Directory bucket names must be unique in the chosen Zone - // (Availability Zone or Local Zone). Bucket names must follow the format - // bucket-base-name--zone-id--x-s3 (for example, - // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming - // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. - // - // Copying objects across different Amazon Web Services Regions isn't supported - // when the source or destination bucket is in Amazon Web Services Local Zones. The - // source and destination buckets must have the same parent Amazon Web Services - // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code - // InvalidRequest . - // - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see [Using access points]in the Amazon S3 User Guide. - // - // Access points and Object Lambda access points are not supported by directory - // buckets. - // - // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must - // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname - // takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. - // - // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html - // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html - // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html - // - // This member is required. - Bucket *string - - // Specifies the source object for the copy operation. You specify the value in - // one of two formats, depending on whether you want to access the source object - // through an [access point]: - // - // - For objects not accessed through an access point, specify the name of the - // source bucket and key of the source object, separated by a slash (/). For - // example, to copy the object reports/january.pdf from the bucket - // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must - // be URL-encoded. - // - // - For objects accessed through access points, specify the Amazon Resource - // Name (ARN) of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/ . For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2 , use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf - // . The value must be URL encoded. - // - // - Amazon S3 supports copy operations using Access points only when the source - // and destination buckets are in the same Amazon Web Services Region. - // - // - Access points are not supported by directory buckets. - // - // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the - // ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 - // in Region us-west-2 , use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf - // . The value must be URL-encoded. - // - // If your bucket has versioning enabled, you could have multiple versions of the - // same object. By default, x-amz-copy-source identifies the current version of - // the source object to copy. To copy a specific version of the source object to - // copy, append ?versionId= to the x-amz-copy-source request header (for example, - // x-amz-copy-source: - // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). - // - // If the current version is a delete marker and you don't specify a versionId in - // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, - // because the object does not exist. If you specify versionId in the - // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an - // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . - // - // Directory buckets - S3 Versioning isn't enabled and supported for directory - // buckets. - // - // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html - // - // This member is required. - CopySource *string - - // Object key for which the multipart upload was initiated. - // - // This member is required. - Key *string - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - // - // This member is required. - PartNumber *int32 - - // Upload ID identifying the multipart upload whose part is being copied. - // - // This member is required. - UploadId *string - - // Copies the object if its entity tag (ETag) matches the specified tag. - // - // If both of the x-amz-copy-source-if-match and - // x-amz-copy-source-if-unmodified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-match condition evaluates to true , and; - // - // x-amz-copy-source-if-unmodified-since condition evaluates to false ; - // - // Amazon S3 returns 200 OK and copies the data. - CopySourceIfMatch *string - - // Copies the object if it has been modified since the specified time. - // - // If both of the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-none-match condition evaluates to false , and; - // - // x-amz-copy-source-if-modified-since condition evaluates to true ; - // - // Amazon S3 returns 412 Precondition Failed response code. - CopySourceIfModifiedSince *time.Time - - // Copies the object if its entity tag (ETag) is different than the specified ETag. - // - // If both of the x-amz-copy-source-if-none-match and - // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-none-match condition evaluates to false , and; - // - // x-amz-copy-source-if-modified-since condition evaluates to true ; - // - // Amazon S3 returns 412 Precondition Failed response code. - CopySourceIfNoneMatch *string - - // Copies the object if it hasn't been modified since the specified time. - // - // If both of the x-amz-copy-source-if-match and - // x-amz-copy-source-if-unmodified-since headers are present in the request as - // follows: - // - // x-amz-copy-source-if-match condition evaluates to true , and; - // - // x-amz-copy-source-if-unmodified-since condition evaluates to false ; - // - // Amazon S3 returns 200 OK and copies the data. - CopySourceIfUnmodifiedSince *time.Time - - // The range of bytes to copy from the source object. The range value must use the - // form bytes=first-last, where the first and last are the zero-based byte offsets - // to copy. For example, bytes=0-9 indicates that you want to copy the first 10 - // bytes of the source. You can copy a range only if the source object is greater - // than 5 MB. - CopySourceRange *string - - // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceSSECustomerKeyMD5 *string - - // The account ID of the expected destination bucket owner. If the account ID that - // you provide does not match the actual owner of the destination bucket, the - // request fails with the HTTP status code 403 Forbidden (access denied). - ExpectedBucketOwner *string - - // The account ID of the expected source bucket owner. If the account ID that you - // provide does not match the actual owner of the source bucket, the request fails - // with the HTTP status code 403 Forbidden (access denied). - ExpectedSourceBucketOwner *string - - // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. If either the - // source or destination S3 bucket has Requester Pays enabled, the requester will - // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User - // Guide. - // - // This functionality is not supported for directory buckets. - // - // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer types.RequestPayer - - // Specifies the algorithm to use when encrypting the object (for example, AES256). - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerAlgorithm *string - - // Specifies the customer-provided encryption key for Amazon S3 to use in - // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be - // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKey *string - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. - // - // This functionality is not supported when the destination bucket is a directory - // bucket. - SSECustomerKeyMD5 *string - - noSmithyDocumentSerde -} - -func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { - - p.Bucket = in.Bucket - p.DisableS3ExpressSessionAuth = ptr.Bool(true) -} - -type UploadPartCopyOutput struct { - - // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). - BucketKeyEnabled *bool - - // Container for all response elements. - CopyPartResult *types.CopyPartResult - - // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. - // - // This functionality is not supported when the source object is in a directory - // bucket. - CopySourceVersionId *string - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to confirm the encryption - // algorithm that's used. - // - // This functionality is not supported for directory buckets. - SSECustomerAlgorithm *string - - // If server-side encryption with a customer-provided encryption key was - // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. - // - // This functionality is not supported for directory buckets. - SSECustomerKeyMD5 *string - - // If present, indicates the ID of the KMS key that was used for object encryption. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). - ServerSideEncryption types.ServerSideEncryption - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPartCopy"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func (v *UploadPartCopyInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "UploadPartCopy", - } -} - -// getUploadPartCopyBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, -func getUploadPartCopyBucketMember(input interface{}) (*string, bool) { - in := input.(*UploadPartCopyInput) - if in.Bucket == nil { - return nil, false - } - return in.Bucket, true -} -func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: getUploadPartCopyBucketMember, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: false, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go deleted file mode 100644 index 9bce6f8ab..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" - "time" -) - -// This operation is not supported for directory buckets. -// -// Passes transformed objects to a GetObject operation when using Object Lambda -// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the -// Amazon S3 User Guide. -// -// This operation supports metadata that can be returned by [GetObject], in addition to -// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The -// GetObject response metadata is supported so that the WriteGetObjectResponse -// caller, typically an Lambda function, can provide the same metadata when it -// internally invokes GetObject . When WriteGetObjectResponse is called by a -// customer-owned Lambda function, the metadata returned to the end user GetObject -// call might differ from what Amazon S3 would normally return. -// -// You can include any number of metadata headers. When including a metadata -// header, it should be prefaced with x-amz-meta . For example, -// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to -// forward GetObject metadata. -// -// Amazon Web Services provides some prebuilt Lambda functions that you can use -// with S3 Object Lambda to detect and redact personally identifiable information -// (PII) and decompress S3 objects. These Lambda functions are available in the -// Amazon Web Services Serverless Application Repository, and can be selected -// through the Amazon Web Services Management Console when you create your Object -// Lambda access point. -// -// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a -// natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically detects personally -// identifiable information (PII) such as names, addresses, dates, credit card -// numbers, and social security numbers from documents in your Amazon S3 bucket. -// -// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a -// natural language processing (NLP) service using machine learning to find -// insights and relationships in text. It automatically redacts personally -// identifiable information (PII) such as names, addresses, dates, credit card -// numbers, and social security numbers from documents in your Amazon S3 bucket. -// -// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is -// equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. -// -// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 -// User Guide. -// -// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html -// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html -// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { - if params == nil { - params = &WriteGetObjectResponseInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*WriteGetObjectResponseOutput) - out.ResultMetadata = metadata - return out, nil -} - -type WriteGetObjectResponseInput struct { - - // Route prefix to the HTTP URL generated. - // - // This member is required. - RequestRoute *string - - // A single use encrypted token that maps WriteGetObjectResponse to the end user - // GetObject request. - // - // This member is required. - RequestToken *string - - // Indicates that a range of bytes was specified. - AcceptRanges *string - - // The object data. - Body io.Reader - - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for - // server-side encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled *bool - - // Specifies caching behavior along the request/reply chain. - CacheControl *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 32-bit CRC-32 checksum of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 32-bit CRC-32C checksum of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 64-bit CRC-64NVME checksum of the part. For more information, - // see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the Base64 - // encoded, 256-bit SHA-256 digest of the object returned by the Object Lambda - // function. This may not match the checksum for the object stored in Amazon S3. - // Amazon S3 will perform validation of the checksum values only when the original - // GetObject request required checksum validation. For more information about - // checksums, see [Checking object integrity]in the Amazon S3 User Guide. - // - // Only one checksum header can be specified at a time. If you supply multiple - // checksum headers, this request will fail. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Specifies presentational information for the object. - ContentDisposition *string - - // Specifies what content encodings have been applied to the object and thus what - // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. - ContentEncoding *string - - // The language the content is in. - ContentLanguage *string - - // The size of the content body in bytes. - ContentLength *int64 - - // The portion of the object returned in the response. - ContentRange *string - - // A standard MIME type describing the format of the object data. - ContentType *string - - // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) - // a delete marker. - DeleteMarker *bool - - // An opaque identifier assigned by a web server to a specific version of a - // resource found at a URL. - ETag *string - - // A string that uniquely identifies an error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with - // a successful StatusCode header or when the transformed object is provided in - // the body. All error codes from S3 are sentence-cased. The regular expression - // (regex) value is "^[A-Z][a-zA-Z]+$" . - ErrorCode *string - - // Contains a generic description of the error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with - // a successful StatusCode header or when the transformed object is provided in - // body. - ErrorMessage *string - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // that provide the object expiration information. The value of the rule-id is - // URL-encoded. - Expiration *string - - // The date and time at which the object is no longer cacheable. - Expires *time.Time - - // The date and time that the object was last modified. - LastModified *time.Time - - // A map of metadata to store with the object in S3. - Metadata map[string]string - - // Set to the number of metadata entries not returned in x-amz-meta headers. This - // can happen if you create metadata using an API like SOAP that supports more - // flexible metadata than the REST API. For example, using SOAP, you can create - // metadata whose values are not legal HTTP headers. - MissingMeta *int32 - - // Indicates whether an object stored in Amazon S3 has an active legal hold. - ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - - // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see [Object Lock]. - // - // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html - ObjectLockMode types.ObjectLockMode - - // The date and time when Object Lock is configured to expire. - ObjectLockRetainUntilDate *time.Time - - // The count of parts this object has. - PartsCount *int32 - - // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see [Replication]. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html - ReplicationStatus types.ReplicationStatus - - // If present, indicates that the requester was successfully charged for the - // request. - // - // This functionality is not supported for directory buckets. - RequestCharged types.RequestCharged - - // Provides information about object restoration operation and expiration time of - // the restored object copy. - Restore *string - - // Encryption algorithm used if server-side encryption with a customer-provided - // encryption key was specified for object stored in Amazon S3. - SSECustomerAlgorithm *string - - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. - // - // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html - SSECustomerKeyMD5 *string - - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web - // Services Key Management Service (Amazon Web Services KMS) symmetric encryption - // customer managed key that was used for stored in Amazon S3 object. - SSEKMSKeyId *string - - // The server-side encryption algorithm used when storing requested object in - // Amazon S3 (for example, AES256, aws:kms ). - ServerSideEncryption types.ServerSideEncryption - - // The integer status code for an HTTP response of a corresponding GetObject - // request. The following is a list of status codes. - // - // - 200 - OK - // - // - 206 - Partial Content - // - // - 304 - Not Modified - // - // - 400 - Bad Request - // - // - 401 - Unauthorized - // - // - 403 - Forbidden - // - // - 404 - Not Found - // - // - 405 - Method Not Allowed - // - // - 409 - Conflict - // - // - 411 - Length Required - // - // - 412 - Precondition Failed - // - // - 416 - Range Not Satisfiable - // - // - 500 - Internal Server Error - // - // - 503 - Service Unavailable - StatusCode *int32 - - // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. - // - // For more information, see [Storage Classes]. - // - // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - StorageClass types.StorageClass - - // The number of tags, if any, on the object. - TagCount *int32 - - // An ID used to reference a specific version of the object. - VersionId *string - - noSmithyDocumentSerde -} - -func (in *WriteGetObjectResponseInput) bindEndpointParams(p *EndpointParameters) { - - p.UseObjectLambdaEndpoint = ptr.Bool(true) -} - -type WriteGetObjectResponseOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "WriteGetObjectResponse"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addUnsignedPayload(stack); err != nil { - return err - } - if err = addContentSHA256Header(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addPutBucketContextMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addIsExpressUserAgent(stack); err != nil { - return err - } - if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { - return err - } - if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil { - return err - } - if err = addMetadataRetrieverMiddleware(stack); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil { - return err - } - if err = disableAcceptEncodingGzip(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -type endpointPrefix_opWriteGetObjectResponseMiddleware struct { -} - -func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { - return "EndpointHostPrefix" -} - -func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - opaqueInput := getOperationInput(ctx) - input, ok := opaqueInput.(*WriteGetObjectResponseInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input type %T", opaqueInput) - } - - var prefix strings.Builder - if input.RequestRoute == nil { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")} - } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)} - } else { - prefix.WriteString(*input.RequestRoute) - } - prefix.WriteString(".") - req.URL.Host = prefix.String() + req.URL.Host - - return next.HandleFinalize(ctx, in) -} -func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, "ResolveEndpointV2", middleware.After) -} - -func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "WriteGetObjectResponse", - } -} - -func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error { - return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ - Accessor: s3cust.UpdateEndpointParameterAccessor{ - GetBucketFromInput: nopGetBucketAccessor, - }, - UsePathStyle: options.UsePathStyle, - UseAccelerate: options.UseAccelerate, - SupportsAccelerate: true, - TargetS3ObjectLambda: true, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointOptions, - UseARNRegion: options.UseARNRegion, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go deleted file mode 100644 index 6faedcc0b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ /dev/null @@ -1,347 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(ctx, input, options) -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The endpoint resolver parameters for this operation. This service's default - // resolver delegates to endpoint rules. - endpointParams *EndpointParameters - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthEndpointParams(ctx, params, input, options) - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "WriteGetObjectResponse": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "s3") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - smithyhttp.SetIsUnsignedPayload(&props, true) - return props - }(), - }, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "s3") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - - { - SchemeID: smithyauth.SchemeIDSigV4A, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4ASigningName(&props, "s3") - smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region}) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go deleted file mode 100644 index 860af056a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go +++ /dev/null @@ -1,47 +0,0 @@ -package s3 - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - "github.com/aws/smithy-go/middleware" -) - -// putBucketContextMiddleware stores the input bucket name within the request context (if -// present) which is required for a variety of custom S3 behaviors -type putBucketContextMiddleware struct{} - -func (*putBucketContextMiddleware) ID() string { - return "putBucketContext" -} - -func (m *putBucketContextMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if bucket, ok := m.bucketFromInput(in.Parameters); ok { - ctx = customizations.SetBucket(ctx, bucket) - } - return next.HandleSerialize(ctx, in) -} - -func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) { - v, ok := params.(bucketer) - if !ok { - return "", false - } - - return v.bucket() -} - -func addPutBucketContextMiddleware(stack *middleware.Stack) error { - // This is essentially a post-Initialize task - only run it once the input - // has received all modifications from that phase. Therefore we add it as - // an early Serialize step. - // - // FUTURE: it would be nice to have explicit phases that only we as SDK - // authors can hook into (such as between phases like this really should - // be) - return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go deleted file mode 100644 index 4e7f7e24e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go +++ /dev/null @@ -1,15 +0,0 @@ -package s3 - -// implemented by all S3 input structures -type bucketer interface { - bucket() (string, bool) -} - -func bucketFromInput(params interface{}) (string, bool) { - v, ok := params.(bucketer) - if !ok { - return "", false - } - - return v.bucket() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go deleted file mode 100644 index 5803b9e45..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/smithy-go/middleware" -) - -// backfills checksum algorithm onto the context for CreateMultipart upload so -// transfer manager can set a checksum header on the request accordingly for -// s3express requests -type setCreateMPUChecksumAlgorithm struct{} - -func (*setCreateMPUChecksumAlgorithm) ID() string { - return "setCreateMPUChecksumAlgorithm" -} - -func (*setCreateMPUChecksumAlgorithm) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters) - } - - ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm)) - return next.HandleSerialize(ctx, in) -} - -func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error { - return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go deleted file mode 100644 index 877e4231b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ /dev/null @@ -1,24189 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" - awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "strconv" - "strings" - "time" -) - -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - -type awsRestxml_deserializeOpAbortMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata) - } - output := &AbortMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchUpload", errorCode): - return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpCompleteMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata) - } - output := &CompleteMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CompleteMultipartUploadOutput - if *v == nil { - sv = &CompleteMultipartUploadOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Location", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Location = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCopyObject struct { -} - -func (*awsRestxml_deserializeOpCopyObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata) - } - output := &CopyObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ObjectNotInActiveTierError", errorCode): - return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CopySourceVersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CopyObjectOutput - if *v == nil { - sv = &CopyObjectOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CopyObjectResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCreateBucket struct { -} - -func (*awsRestxml_deserializeOpCreateBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata) - } - output := &CreateBucketOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("BucketAlreadyExists", errorCode): - return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody) - - case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode): - return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("Location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Location = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata) - } - output := &CreateBucketMetadataTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpCreateMultipartUpload struct { -} - -func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata) - } - output := &CreateMultipartUploadOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.AbortDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AbortRuleId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CreateMultipartUploadOutput - if *v == nil { - sv = &CreateMultipartUploadOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpCreateSession struct { -} - -func (*awsRestxml_deserializeOpCreateSession) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata) - } - output := &CreateSessionOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *CreateSessionOutput - if *v == nil { - sv = &CreateSessionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpDeleteBucket struct { -} - -func (*awsRestxml_deserializeOpDeleteBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata) - } - output := &DeleteBucketOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata) - } - output := &DeleteBucketAnalyticsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketCors struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata) - } - output := &DeleteBucketCorsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata) - } - output := &DeleteBucketEncryptionOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &DeleteBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata) - } - output := &DeleteBucketInventoryConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketLifecycle struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata) - } - output := &DeleteBucketLifecycleOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata) - } - output := &DeleteBucketMetadataTableConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata) - } - output := &DeleteBucketMetricsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata) - } - output := &DeleteBucketOwnershipControlsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata) - } - output := &DeleteBucketPolicyOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketReplication struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata) - } - output := &DeleteBucketReplicationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketTagging struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata) - } - output := &DeleteBucketTaggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata) - } - output := &DeleteBucketWebsiteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpDeleteObject struct { -} - -func (*awsRestxml_deserializeOpDeleteObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata) - } - output := &DeleteObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpDeleteObjects struct { -} - -func (*awsRestxml_deserializeOpDeleteObjects) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata) - } - output := &DeleteObjectsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DeleteObjectsOutput - if *v == nil { - sv = &DeleteObjectsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Deleted", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpDeleteObjectTagging struct { -} - -func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata) - } - output := &DeleteObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpDeletePublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata) - } - output := &DeletePublicAccessBlockOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata) - } - output := &GetBucketAccelerateConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(v *GetBucketAccelerateConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAccelerateConfigurationOutput - if *v == nil { - sv = &GetBucketAccelerateConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.BucketAccelerateStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketAcl struct { -} - -func (*awsRestxml_deserializeOpGetBucketAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata) - } - output := &GetBucketAclOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAclOutput - if *v == nil { - sv = &GetBucketAclOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlList", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata) - } - output := &GetBucketAnalyticsConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketAnalyticsConfigurationOutput - if *v == nil { - sv = &GetBucketAnalyticsConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketCors struct { -} - -func (*awsRestxml_deserializeOpGetBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata) - } - output := &GetBucketCorsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketCorsOutput - if *v == nil { - sv = &GetBucketCorsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CORSRule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata) - } - output := &GetBucketEncryptionOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketEncryptionOutput - if *v == nil { - sv = &GetBucketEncryptionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &GetBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketIntelligentTieringConfigurationOutput - if *v == nil { - sv = &GetBucketIntelligentTieringConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata) - } - output := &GetBucketInventoryConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketInventoryConfigurationOutput - if *v == nil { - sv = &GetBucketInventoryConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("InventoryConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata) - } - output := &GetBucketLifecycleConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLifecycleConfigurationOutput - if *v == nil { - sv = &GetBucketLifecycleConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLocation struct { -} - -func (*awsRestxml_deserializeOpGetBucketLocation) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata) - } - output := &GetBucketLocationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLocationOutput - if *v == nil { - sv = &GetBucketLocationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LocationConstraint", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.LocationConstraint = types.BucketLocationConstraint(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketLogging struct { -} - -func (*awsRestxml_deserializeOpGetBucketLogging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata) - } - output := &GetBucketLoggingOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketLoggingOutput - if *v == nil { - sv = &GetBucketLoggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LoggingEnabled", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata) - } - output := &GetBucketMetadataTableConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketMetadataTableConfigurationOutput - if *v == nil { - sv = &GetBucketMetadataTableConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) - } - output := &GetBucketMetricsConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketMetricsConfigurationOutput - if *v == nil { - sv = &GetBucketMetricsConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("MetricsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) - } - output := &GetBucketNotificationConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketNotificationConfigurationOutput - if *v == nil { - sv = &GetBucketNotificationConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("QueueConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TopicConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata) - } - output := &GetBucketOwnershipControlsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketOwnershipControlsOutput - if *v == nil { - sv = &GetBucketOwnershipControlsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("OwnershipControls", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata) - } - output := &GetBucketPolicyOutput{} - out.Result = output - - err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - var buf bytes.Buffer - if contentLength > 0 { - buf.Grow(int(contentLength)) - } else { - buf.Grow(512) - } - - _, err := buf.ReadFrom(body) - if err != nil { - return err - } - if buf.Len() > 0 { - v.Policy = ptr.String(buf.String()) - } - return nil -} - -type awsRestxml_deserializeOpGetBucketPolicyStatus struct { -} - -func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata) - } - output := &GetBucketPolicyStatusOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketPolicyStatusOutput - if *v == nil { - sv = &GetBucketPolicyStatusOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PolicyStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketReplication struct { -} - -func (*awsRestxml_deserializeOpGetBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata) - } - output := &GetBucketReplicationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketReplicationOutput - if *v == nil { - sv = &GetBucketReplicationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicationConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketRequestPayment struct { -} - -func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata) - } - output := &GetBucketRequestPaymentOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketRequestPaymentOutput - if *v == nil { - sv = &GetBucketRequestPaymentOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Payer", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Payer = types.Payer(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketTagging struct { -} - -func (*awsRestxml_deserializeOpGetBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata) - } - output := &GetBucketTaggingOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketTaggingOutput - if *v == nil { - sv = &GetBucketTaggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TagSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketVersioning struct { -} - -func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata) - } - output := &GetBucketVersioningOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketVersioningOutput - if *v == nil { - sv = &GetBucketVersioningOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("MfaDelete", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.MFADelete = types.MFADeleteStatus(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.BucketVersioningStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata) - } - output := &GetBucketWebsiteOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetBucketWebsiteOutput - if *v == nil { - sv = &GetBucketWebsiteOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ErrorDocument", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IndexDocument", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RoutingRules", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObject struct { -} - -func (*awsRestxml_deserializeOpGetObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata) - } - output := &GetObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("InvalidObjectState", errorCode): - return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody) - - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AcceptRanges = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CacheControl = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentDisposition = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentEncoding = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentLanguage = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.ContentLength = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentRange = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentType = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - deserOverride, err := deserializeS3Expires(headerValues[0]) - if err != nil { - return err - } - v.Expires = deserOverride - - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ExpiresString = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - for headerKey, headerValues := range response.Header { - if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { - if v.Metadata == nil { - v.Metadata = map[string]string{} - } - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] - } - } - - if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.MissingMeta = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseDateTime(headerValues[0]) - if err != nil { - return err - } - v.ObjectLockRetainUntilDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.PartsCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Restore = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.StorageClass = types.StorageClass(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.TagCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.WebsiteRedirectLocation = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - v.Body = body - return nil -} - -type awsRestxml_deserializeOpGetObjectAcl struct { -} - -func (*awsRestxml_deserializeOpGetObjectAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata) - } - output := &GetObjectAclOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectAclOutput - if *v == nil { - sv = &GetObjectAclOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlList", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectAttributes struct { -} - -func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata) - } - output := &GetObjectAttributesOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectAttributesOutput - if *v == nil { - sv = &GetObjectAttributesOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Checksum", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("ObjectParts", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ObjectSize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSize = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectLegalHold struct { -} - -func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata) - } - output := &GetObjectLegalHoldOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectLegalHoldOutput - if *v == nil { - sv = &GetObjectLegalHoldOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("LegalHold", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectLockConfiguration struct { -} - -func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata) - } - output := &GetObjectLockConfigurationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectLockConfigurationOutput - if *v == nil { - sv = &GetObjectLockConfigurationOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectLockConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectRetention struct { -} - -func (*awsRestxml_deserializeOpGetObjectRetention) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata) - } - output := &GetObjectRetentionOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectRetentionOutput - if *v == nil { - sv = &GetObjectRetentionOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Retention", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectTagging struct { -} - -func (*awsRestxml_deserializeOpGetObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata) - } - output := &GetObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetObjectTaggingOutput - if *v == nil { - sv = &GetObjectTaggingOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TagSet", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpGetObjectTorrent struct { -} - -func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata) - } - output := &GetObjectTorrentOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error { - if v == nil { - return fmt.Errorf("unsupported deserialization of nil %T", v) - } - v.Body = body - return nil -} - -type awsRestxml_deserializeOpGetPublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata) - } - output := &GetPublicAccessBlockOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetPublicAccessBlockOutput - if *v == nil { - sv = &GetPublicAccessBlockOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpHeadBucket struct { -} - -func (*awsRestxml_deserializeOpHeadBucket) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata) - } - output := &HeadBucketOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NotFound", errorCode): - return awsRestxml_deserializeErrorNotFound(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.AccessPointAlias = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketLocationName = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketLocationType = types.LocationType(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.BucketRegion = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpHeadObject struct { -} - -func (*awsRestxml_deserializeOpHeadObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata) - } - output := &HeadObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NotFound", errorCode): - return awsRestxml_deserializeErrorNotFound(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AcceptRanges = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ArchiveStatus = types.ArchiveStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CacheControl = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentDisposition = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentEncoding = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentLanguage = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.ContentLength = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ContentType = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.DeleteMarker = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - deserOverride, err := deserializeS3Expires(headerValues[0]) - if err != nil { - return err - } - v.Expires = deserOverride - - } - - if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ExpiresString = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.LastModified = ptr.Time(t) - } - - for headerKey, headerValues := range response.Header { - if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") { - if v.Metadata == nil { - v.Metadata = map[string]string{} - } - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0] - } - } - - if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.MissingMeta = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ObjectLockMode = types.ObjectLockMode(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseDateTime(headerValues[0]) - if err != nil { - return err - } - v.ObjectLockRetainUntilDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 32) - if err != nil { - return err - } - v.PartsCount = ptr.Int32(int32(vv)) - } - - if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ReplicationStatus = types.ReplicationStatus(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Restore = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.StorageClass = types.StorageClass(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.WebsiteRedirectLocation = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata) - } - output := &ListBucketAnalyticsConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketAnalyticsConfigurationsOutput - if *v == nil { - sv = &ListBucketAnalyticsConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AnalyticsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata) - } - output := &ListBucketIntelligentTieringConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketIntelligentTieringConfigurationsOutput - if *v == nil { - sv = &ListBucketIntelligentTieringConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketInventoryConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata) - } - output := &ListBucketInventoryConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketInventoryConfigurationsOutput - if *v == nil { - sv = &ListBucketInventoryConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("InventoryConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBucketMetricsConfigurations struct { -} - -func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata) - } - output := &ListBucketMetricsConfigurationsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketMetricsConfigurationsOutput - if *v == nil { - sv = &ListBucketMetricsConfigurationsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("MetricsConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListBuckets struct { -} - -func (*awsRestxml_deserializeOpListBuckets) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata) - } - output := &ListBucketsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListBucketsOutput - if *v == nil { - sv = &ListBucketsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Buckets", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListDirectoryBuckets struct { -} - -func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata) - } - output := &ListDirectoryBucketsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListDirectoryBucketsOutput - if *v == nil { - sv = &ListDirectoryBucketsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Buckets", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListMultipartUploads struct { -} - -func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) - } - output := &ListMultipartUploadsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(v *ListMultipartUploadsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListMultipartUploadsOutput - if *v == nil { - sv = &ListMultipartUploadsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("MaxUploads", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxUploads = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextKeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextKeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("NextUploadIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextUploadIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("UploadIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Upload", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjects struct { -} - -func (*awsRestxml_deserializeOpListObjects) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata) - } - output := &ListObjectsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectsOutput(v *ListObjectsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectsOutput - if *v == nil { - sv = &ListObjectsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Contents", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("Marker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Marker = ptr.String(xtv) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjectsV2 struct { -} - -func (*awsRestxml_deserializeOpListObjectsV2) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata) - } - output := &ListObjectsV2Output{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchBucket", errorCode): - return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(v *ListObjectsV2Output, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectsV2Output - if *v == nil { - sv = &ListObjectsV2Output{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Contents", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyCount", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.KeyCount = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextContinuationToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextContinuationToken = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("StartAfter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StartAfter = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListObjectVersions struct { -} - -func (*awsRestxml_deserializeOpListObjectVersions) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata) - } - output := &ListObjectVersionsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(v *ListObjectVersionsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListObjectVersionsOutput - if *v == nil { - sv = &ListObjectVersionsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CommonPrefixes", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("DeleteMarker", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Delimiter", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Delimiter = ptr.String(xtv) - } - - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("KeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("MaxKeys", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxKeys = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - case strings.EqualFold("NextKeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextKeyMarker = ptr.String(xtv) - } - - case strings.EqualFold("NextVersionIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextVersionIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("VersionIdMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionIdMarker = ptr.String(xtv) - } - - case strings.EqualFold("Version", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpListParts struct { -} - -func (*awsRestxml_deserializeOpListParts) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata) - } - output := &ListPartsOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) - if err != nil { - return err - } - v.AbortDate = ptr.Time(t) - } - - if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.AbortRuleId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *ListPartsOutput - if *v == nil { - sv = &ListPartsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("Initiator", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("MaxParts", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxParts = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextPartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextPartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Part", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata) - } - output := &PutBucketAccelerateConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketAcl struct { -} - -func (*awsRestxml_deserializeOpPutBucketAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata) - } - output := &PutBucketAclOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata) - } - output := &PutBucketAnalyticsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketCors struct { -} - -func (*awsRestxml_deserializeOpPutBucketCors) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata) - } - output := &PutBucketCorsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketEncryption struct { -} - -func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata) - } - output := &PutBucketEncryptionOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata) - } - output := &PutBucketIntelligentTieringConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata) - } - output := &PutBucketInventoryConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata) - } - output := &PutBucketLifecycleConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutBucketLogging struct { -} - -func (*awsRestxml_deserializeOpPutBucketLogging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata) - } - output := &PutBucketLoggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata) - } - output := &PutBucketMetricsConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata) - } - output := &PutBucketNotificationConfigurationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketOwnershipControls struct { -} - -func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata) - } - output := &PutBucketOwnershipControlsOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketPolicy struct { -} - -func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata) - } - output := &PutBucketPolicyOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketReplication struct { -} - -func (*awsRestxml_deserializeOpPutBucketReplication) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata) - } - output := &PutBucketReplicationOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketRequestPayment struct { -} - -func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata) - } - output := &PutBucketRequestPaymentOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketTagging struct { -} - -func (*awsRestxml_deserializeOpPutBucketTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata) - } - output := &PutBucketTaggingOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketVersioning struct { -} - -func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata) - } - output := &PutBucketVersioningOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutBucketWebsite struct { -} - -func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata) - } - output := &PutBucketWebsiteOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpPutObject struct { -} - -func (*awsRestxml_deserializeOpPutObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata) - } - output := &PutObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("EncryptionTypeMismatch", errorCode): - return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody) - - case strings.EqualFold("InvalidRequest", errorCode): - return awsRestxml_deserializeErrorInvalidRequest(response, errorBody) - - case strings.EqualFold("InvalidWriteOffset", errorCode): - return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody) - - case strings.EqualFold("TooManyParts", errorCode): - return awsRestxml_deserializeErrorTooManyParts(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumType = types.ChecksumType(headerValues[0]) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.Expiration = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseInt(headerValues[0], 0, 64) - if err != nil { - return err - } - v.Size = ptr.Int64(vv) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectAcl struct { -} - -func (*awsRestxml_deserializeOpPutObjectAcl) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata) - } - output := &PutObjectAclOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("NoSuchKey", errorCode): - return awsRestxml_deserializeErrorNoSuchKey(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectLegalHold struct { -} - -func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata) - } - output := &PutObjectLegalHoldOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectLockConfiguration struct { -} - -func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata) - } - output := &PutObjectLockConfigurationOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectRetention struct { -} - -func (*awsRestxml_deserializeOpPutObjectRetention) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata) - } - output := &PutObjectRetentionOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutObjectTagging struct { -} - -func (*awsRestxml_deserializeOpPutObjectTagging) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata) - } - output := &PutObjectTaggingOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.VersionId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpPutPublicAccessBlock struct { -} - -func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata) - } - output := &PutPublicAccessBlockOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpRestoreObject struct { -} - -func (*awsRestxml_deserializeOpRestoreObject) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata) - } - output := &RestoreObjectOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode): - return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RestoreOutputPath = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpSelectObjectContent struct { -} - -func (*awsRestxml_deserializeOpSelectObjectContent) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata) - } - output := &SelectObjectContentOutput{} - out.Result = output - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsRestxml_deserializeOpUploadPart struct { -} - -func (*awsRestxml_deserializeOpUploadPart) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata) - } - output := &UploadPartOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC32C = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumCRC64NVME = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA1 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ChecksumSHA256 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ETag = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} - -type awsRestxml_deserializeOpUploadPartCopy struct { -} - -func (*awsRestxml_deserializeOpUploadPartCopy) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata) - } - output := &UploadPartCopyOutput{} - out.Result = output - - err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response) - if err != nil { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error { - if v == nil { - return fmt.Errorf("unsupported deserialization for nil %T", v) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - vv, err := strconv.ParseBool(headerValues[0]) - if err != nil { - return err - } - v.BucketKeyEnabled = ptr.Bool(vv) - } - - if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.CopySourceVersionId = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.RequestCharged = types.RequestCharged(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerAlgorithm = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSECustomerKeyMD5 = ptr.String(headerValues[0]) - } - - if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - v.SSEKMSKeyId = ptr.String(headerValues[0]) - } - - return nil -} -func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *UploadPartCopyOutput - if *v == nil { - sv = &UploadPartCopyOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CopyPartResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -type awsRestxml_deserializeOpWriteGetObjectResponse struct { -} - -func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata) - } - output := &WriteGetObjectResponseOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ - UseStatusCode: true, StatusCode: response.StatusCode, - }) - if err != nil { - return err - } - if hostID := errorComponents.HostID; len(hostID) != 0 { - s3shared.SetHostIDMetadata(metadata, hostID) - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader) - if eventType == nil { - return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader) - } - - switch { - case strings.EqualFold("Cont", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberCont{} - if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("End", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberEnd{} - if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Progress", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberProgress{} - if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Records", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberRecords{} - if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - case strings.EqualFold("Stats", eventType.String()): - vv := &types.SelectObjectContentEventStreamMemberStats{} - if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil { - return err - } - *v = vv - return nil - - default: - buffer := bytes.NewBuffer(nil) - eventstream.NewEncoder().Encode(buffer, *msg) - *v = &types.UnknownUnionMember{ - Tag: eventType.String(), - Value: buffer.Bytes(), - } - return nil - - } -} - -func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error { - exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader) - if exceptionType == nil { - return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader) - } - - switch { - default: - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(br, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - errorComponents, err := awsxml.GetErrorResponseComponents(br, true) - if err != nil { - return err - } - errorCode := "UnknownError" - errorMessage := errorCode - if ev := exceptionType.String(); len(ev) > 0 { - errorCode = ev - } else if ev := errorComponents.Code; len(ev) > 0 { - errorCode = ev - } - if ev := errorComponents.Message; len(ev) > 0 { - errorMessage = ev - } - return &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - - } -} - -func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - if msg.Payload != nil { - bsv := make([]byte, len(msg.Payload)) - copy(bsv, msg.Payload) - - v.Payload = bsv - } - return nil -} - -func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentStats(&v.Details, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error { - if v == nil { - return fmt.Errorf("unexpected serialization of nil %T", v) - } - - br := bytes.NewReader(msg.Payload) - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(br, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentEndEvent(&v, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return nil -} - -func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ContinuationEvent - if *v == nil { - sv = &types.ContinuationEvent{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EndEvent - if *v == nil { - sv = &types.EndEvent{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Progress - if *v == nil { - sv = &types.Progress{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BytesProcessed", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesProcessed = ptr.Int64(i64) - } - - case strings.EqualFold("BytesReturned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesReturned = ptr.Int64(i64) - } - - case strings.EqualFold("BytesScanned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesScanned = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Stats - if *v == nil { - sv = &types.Stats{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BytesProcessed", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesProcessed = ptr.Int64(i64) - } - - case strings.EqualFold("BytesReturned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesReturned = ptr.Int64(i64) - } - - case strings.EqualFold("BytesScanned", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.BytesScanned = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.BucketAlreadyExists{} - return output -} - -func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.BucketAlreadyOwnedByYou{} - return output -} - -func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.EncryptionTypeMismatch{} - return output -} - -func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidObjectState{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequest{} - return output -} - -func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidWriteOffset{} - return output -} - -func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchBucket{} - return output -} - -func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchKey{} - return output -} - -func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchUpload{} - return output -} - -func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NotFound{} - return output -} - -func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ObjectAlreadyInActiveTierError{} - return output -} - -func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ObjectNotInActiveTierError{} - return output -} - -func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.TooManyParts{} - return output -} - -func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AbortIncompleteMultipartUpload - if *v == nil { - sv = &types.AbortIncompleteMultipartUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DaysAfterInitiation", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.DaysAfterInitiation = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AccessControlTranslation - if *v == nil { - sv = &types.AccessControlTranslation{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Owner", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Owner = types.OwnerOverride(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsAndOperator - if *v == nil { - sv = &types.AnalyticsAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsConfiguration - if *v == nil { - sv = &types.AnalyticsConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("StorageClassAnalysis", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.AnalyticsConfiguration - if *v == nil { - sv = make([]types.AnalyticsConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.AnalyticsConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.AnalyticsConfiguration - if *v == nil { - sv = make([]types.AnalyticsConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.AnalyticsConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsExportDestination - if *v == nil { - sv = &types.AnalyticsExportDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3BucketDestination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var uv types.AnalyticsFilter - var memberFound bool - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - var mv types.AnalyticsAndOperator - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.AnalyticsFilterMemberAnd{Value: mv} - memberFound = true - - case strings.EqualFold("Prefix", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.AnalyticsFilterMemberPrefix{Value: mv} - memberFound = true - - case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.AnalyticsFilterMemberTag{Value: mv} - memberFound = true - - default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true - - } - decoder = originalDecoder - } - *v = uv - return nil -} - -func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AnalyticsS3BucketDestination - if *v == nil { - sv = &types.AnalyticsS3BucketDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("BucketAccountId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.BucketAccountId = ptr.String(xtv) - } - - case strings.EqualFold("Format", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Format = types.AnalyticsS3ExportFileFormat(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Bucket - if *v == nil { - sv = &types.Bucket{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BucketRegion", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.BucketRegion = ptr.String(xtv) - } - - case strings.EqualFold("CreationDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.CreationDate = ptr.Time(t) - } - - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.BucketAlreadyExists - if *v == nil { - sv = &types.BucketAlreadyExists{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.BucketAlreadyOwnedByYou - if *v == nil { - sv = &types.BucketAlreadyOwnedByYou{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Bucket - if *v == nil { - sv = make([]types.Bucket, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Bucket", t.Name.Local): - var col types.Bucket - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error { - var sv []types.Bucket - if *v == nil { - sv = make([]types.Bucket, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Bucket - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Checksum - if *v == nil { - sv = &types.Checksum{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ChecksumAlgorithm - if *v == nil { - sv = make([]types.ChecksumAlgorithm, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ChecksumAlgorithm - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.ChecksumAlgorithm(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error { - var sv []types.ChecksumAlgorithm - if *v == nil { - sv = make([]types.ChecksumAlgorithm, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ChecksumAlgorithm - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.ChecksumAlgorithm(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CommonPrefix - if *v == nil { - sv = &types.CommonPrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.CommonPrefix - if *v == nil { - sv = make([]types.CommonPrefix, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.CommonPrefix - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error { - var sv []types.CommonPrefix - if *v == nil { - sv = make([]types.CommonPrefix, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.CommonPrefix - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Condition - if *v == nil { - sv = &types.Condition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HttpErrorCodeReturnedEquals = ptr.String(xtv) - } - - case strings.EqualFold("KeyPrefixEquals", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyPrefixEquals = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CopyObjectResult - if *v == nil { - sv = &types.CopyObjectResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CopyPartResult - if *v == nil { - sv = &types.CopyPartResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.CORSRule - if *v == nil { - sv = &types.CORSRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AllowedHeader", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("AllowedMethod", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("AllowedOrigin", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ExposeHeader", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("MaxAgeSeconds", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxAgeSeconds = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.CORSRule - if *v == nil { - sv = make([]types.CORSRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.CORSRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error { - var sv []types.CORSRule - if *v == nil { - sv = make([]types.CORSRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.CORSRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DefaultRetention - if *v == nil { - sv = &types.DefaultRetention{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Mode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Mode = types.ObjectLockRetentionMode(xtv) - } - - case strings.EqualFold("Years", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Years = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeletedObject - if *v == nil { - sv = &types.DeletedObject{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DeleteMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) - } - sv.DeleteMarker = ptr.Bool(xtv) - } - - case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DeleteMarkerVersionId = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.DeletedObject - if *v == nil { - sv = make([]types.DeletedObject, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.DeletedObject - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error { - var sv []types.DeletedObject - if *v == nil { - sv = make([]types.DeletedObject, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.DeletedObject - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeleteMarkerEntry - if *v == nil { - sv = &types.DeleteMarkerEntry{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsLatest", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) - } - sv.IsLatest = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.DeleteMarkerReplication - if *v == nil { - sv = &types.DeleteMarkerReplication{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.DeleteMarkerReplicationStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.DeleteMarkerEntry - if *v == nil { - sv = make([]types.DeleteMarkerEntry, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.DeleteMarkerEntry - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error { - var sv []types.DeleteMarkerEntry - if *v == nil { - sv = make([]types.DeleteMarkerEntry, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.DeleteMarkerEntry - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Destination - if *v == nil { - sv = &types.Destination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessControlTranslation", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("EncryptionConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Metrics", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ReplicationTime", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EncryptionConfiguration - if *v == nil { - sv = &types.EncryptionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplicaKmsKeyID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EncryptionTypeMismatch - if *v == nil { - sv = &types.EncryptionTypeMismatch{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Error - if *v == nil { - sv = &types.Error{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Code", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Code = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ErrorDetails - if *v == nil { - sv = &types.ErrorDetails{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ErrorCode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ErrorCode = ptr.String(xtv) - } - - case strings.EqualFold("ErrorMessage", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ErrorMessage = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ErrorDocument - if *v == nil { - sv = &types.ErrorDocument{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Error - if *v == nil { - sv = make([]types.Error, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Error - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error { - var sv []types.Error - if *v == nil { - sv = make([]types.Error, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Error - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.EventBridgeConfiguration - if *v == nil { - sv = &types.EventBridgeConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Event - if *v == nil { - sv = make([]types.Event, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Event - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.Event(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error { - var sv []types.Event - if *v == nil { - sv = make([]types.Event, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Event - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.Event(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ExistingObjectReplication - if *v == nil { - sv = &types.ExistingObjectReplication{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ExistingObjectReplicationStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("member", t.Name.Local): - var col string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = xtv - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error { - var sv []string - if *v == nil { - sv = make([]string, 0) - } else { - sv = *v - } - - switch { - default: - var mv string - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.FilterRule - if *v == nil { - sv = &types.FilterRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Name", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Name = types.FilterRuleName(xtv) - } - - case strings.EqualFold("Value", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Value = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.FilterRule - if *v == nil { - sv = make([]types.FilterRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.FilterRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error { - var sv []types.FilterRule - if *v == nil { - sv = make([]types.FilterRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.FilterRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.GetBucketMetadataTableConfigurationResult - if *v == nil { - sv = &types.GetBucketMetadataTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Error", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.GetObjectAttributesParts - if *v == nil { - sv = &types.GetObjectAttributesParts{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = ptr.Bool(xtv) - } - - case strings.EqualFold("MaxParts", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.MaxParts = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NextPartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NextPartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("PartNumberMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartNumberMarker = ptr.String(xtv) - } - - case strings.EqualFold("Part", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PartsCount", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.TotalPartsCount = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Grant - if *v == nil { - sv = &types.Grant{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Grantee", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Permission", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Permission = types.Permission(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Grantee - if *v == nil { - sv = &types.Grantee{} - } else { - sv = *v - } - - for _, attr := range decoder.StartEl.Attr { - name := attr.Name.Local - if len(attr.Name.Space) != 0 { - name = attr.Name.Space + `:` + attr.Name.Local - } - switch { - case strings.EqualFold("xsi:type", name): - val := []byte(attr.Value) - { - xtv := string(val) - sv.Type = types.Type(xtv) - } - - } - } - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("EmailAddress", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.EmailAddress = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("URI", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.URI = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Grant - if *v == nil { - sv = make([]types.Grant, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Grant", t.Name.Local): - var col types.Grant - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error { - var sv []types.Grant - if *v == nil { - sv = make([]types.Grant, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Grant - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IndexDocument - if *v == nil { - sv = &types.IndexDocument{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Suffix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Suffix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Initiator - if *v == nil { - sv = &types.Initiator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringAndOperator - if *v == nil { - sv = &types.IntelligentTieringAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringConfiguration - if *v == nil { - sv = &types.IntelligentTieringConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.IntelligentTieringStatus(xtv) - } - - case strings.EqualFold("Tiering", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.IntelligentTieringConfiguration - if *v == nil { - sv = make([]types.IntelligentTieringConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.IntelligentTieringConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.IntelligentTieringConfiguration - if *v == nil { - sv = make([]types.IntelligentTieringConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.IntelligentTieringConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IntelligentTieringFilter - if *v == nil { - sv = &types.IntelligentTieringFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidObjectState - if *v == nil { - sv = &types.InvalidObjectState{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessTier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessTier = types.IntelligentTieringAccessTier(xtv) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidRequest - if *v == nil { - sv = &types.InvalidRequest{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidWriteOffset - if *v == nil { - sv = &types.InvalidWriteOffset{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryConfiguration - if *v == nil { - sv = &types.InventoryConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("IncludedObjectVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv) - } - - case strings.EqualFold("IsEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) - } - sv.IsEnabled = ptr.Bool(xtv) - } - - case strings.EqualFold("OptionalFields", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Schedule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.InventoryConfiguration - if *v == nil { - sv = make([]types.InventoryConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.InventoryConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.InventoryConfiguration - if *v == nil { - sv = make([]types.InventoryConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.InventoryConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryDestination - if *v == nil { - sv = &types.InventoryDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3BucketDestination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryEncryption - if *v == nil { - sv = &types.InventoryEncryption{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("SSE-KMS", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SSE-S3", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryFilter - if *v == nil { - sv = &types.InventoryFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.InventoryOptionalField - if *v == nil { - sv = make([]types.InventoryOptionalField, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - decoder = memberDecoder - switch { - case strings.EqualFold("Field", t.Name.Local): - var col types.InventoryOptionalField - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - col = types.InventoryOptionalField(xtv) - } - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error { - var sv []types.InventoryOptionalField - if *v == nil { - sv = make([]types.InventoryOptionalField, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.InventoryOptionalField - t := decoder.StartEl - _ = t - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = types.InventoryOptionalField(xtv) - } - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventoryS3BucketDestination - if *v == nil { - sv = &types.InventoryS3BucketDestination{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccountId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccountId = ptr.String(xtv) - } - - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("Encryption", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Format", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Format = types.InventoryFormat(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InventorySchedule - if *v == nil { - sv = &types.InventorySchedule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Frequency", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Frequency = types.InventoryFrequency(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LambdaFunctionConfiguration - if *v == nil { - sv = &types.LambdaFunctionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("CloudFunction", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.LambdaFunctionArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.LambdaFunctionConfiguration - if *v == nil { - sv = make([]types.LambdaFunctionConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.LambdaFunctionConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.LambdaFunctionConfiguration - if *v == nil { - sv = make([]types.LambdaFunctionConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.LambdaFunctionConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleExpiration - if *v == nil { - sv = &types.LifecycleExpiration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Date", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Date = ptr.Time(t) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) - } - sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRule - if *v == nil { - sv = &types.LifecycleRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Expiration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ExpirationStatus(xtv) - } - - case strings.EqualFold("Transition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRuleAndOperator - if *v == nil { - sv = &types.LifecycleRuleAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeGreaterThan = ptr.Int64(i64) - } - - case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeLessThan = ptr.Int64(i64) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LifecycleRuleFilter - if *v == nil { - sv = &types.LifecycleRuleFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeGreaterThan = ptr.Int64(i64) - } - - case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.ObjectSizeLessThan = ptr.Int64(i64) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.LifecycleRule - if *v == nil { - sv = make([]types.LifecycleRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.LifecycleRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error { - var sv []types.LifecycleRule - if *v == nil { - sv = make([]types.LifecycleRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.LifecycleRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.LoggingEnabled - if *v == nil { - sv = &types.LoggingEnabled{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TargetBucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TargetBucket = ptr.String(xtv) - } - - case strings.EqualFold("TargetGrants", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TargetObjectKeyFormat", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTargetObjectKeyFormat(&sv.TargetObjectKeyFormat, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("TargetPrefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TargetPrefix = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetadataTableConfigurationResult - if *v == nil { - sv = &types.MetadataTableConfigurationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3TablesDestinationResult", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Metrics - if *v == nil { - sv = &types.Metrics{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("EventThreshold", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.MetricsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetricsAndOperator - if *v == nil { - sv = &types.MetricsAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessPointArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessPointArn = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MetricsConfiguration - if *v == nil { - sv = &types.MetricsConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.MetricsConfiguration - if *v == nil { - sv = make([]types.MetricsConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.MetricsConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.MetricsConfiguration - if *v == nil { - sv = make([]types.MetricsConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.MetricsConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var uv types.MetricsFilter - var memberFound bool - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessPointArn", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} - memberFound = true - - case strings.EqualFold("And", t.Name.Local): - var mv types.MetricsAndOperator - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.MetricsFilterMemberAnd{Value: mv} - memberFound = true - - case strings.EqualFold("Prefix", t.Name.Local): - var mv string - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - mv = xtv - } - uv = &types.MetricsFilterMemberPrefix{Value: mv} - memberFound = true - - case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - uv = &types.MetricsFilterMemberTag{Value: mv} - memberFound = true - - default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true - - } - decoder = originalDecoder - } - *v = uv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MultipartUpload - if *v == nil { - sv = &types.MultipartUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("Initiated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Initiated = ptr.Time(t) - } - - case strings.EqualFold("Initiator", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.StorageClass(xtv) - } - - case strings.EqualFold("UploadId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UploadId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.MultipartUpload - if *v == nil { - sv = make([]types.MultipartUpload, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.MultipartUpload - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error { - var sv []types.MultipartUpload - if *v == nil { - sv = make([]types.MultipartUpload, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.MultipartUpload - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoncurrentVersionExpiration - if *v == nil { - sv = &types.NoncurrentVersionExpiration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NoncurrentDays", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NoncurrentDays = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoncurrentVersionTransition - if *v == nil { - sv = &types.NoncurrentVersionTransition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("NoncurrentDays", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.NoncurrentDays = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.TransitionStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.NoncurrentVersionTransition - if *v == nil { - sv = make([]types.NoncurrentVersionTransition, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.NoncurrentVersionTransition - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error { - var sv []types.NoncurrentVersionTransition - if *v == nil { - sv = make([]types.NoncurrentVersionTransition, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.NoncurrentVersionTransition - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchBucket - if *v == nil { - sv = &types.NoSuchBucket{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchKey - if *v == nil { - sv = &types.NoSuchKey{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NoSuchUpload - if *v == nil { - sv = &types.NoSuchUpload{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NotFound - if *v == nil { - sv = &types.NotFound{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.NotificationConfigurationFilter - if *v == nil { - sv = &types.NotificationConfigurationFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("S3Key", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Object - if *v == nil { - sv = &types.Object{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RestoreStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.ObjectStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectAlreadyInActiveTierError - if *v == nil { - sv = &types.ObjectAlreadyInActiveTierError{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Object - if *v == nil { - sv = make([]types.Object, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Object - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error { - var sv []types.Object - if *v == nil { - sv = make([]types.Object, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Object - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockConfiguration - if *v == nil { - sv = &types.ObjectLockConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectLockEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv) - } - - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockLegalHold - if *v == nil { - sv = &types.ObjectLockLegalHold{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ObjectLockLegalHoldStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockRetention - if *v == nil { - sv = &types.ObjectLockRetention{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Mode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Mode = types.ObjectLockRetentionMode(xtv) - } - - case strings.EqualFold("RetainUntilDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.RetainUntilDate = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectLockRule - if *v == nil { - sv = &types.ObjectLockRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DefaultRetention", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectNotInActiveTierError - if *v == nil { - sv = &types.ObjectNotInActiveTierError{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectPart - if *v == nil { - sv = &types.ObjectPart{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("PartNumber", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PartNumber = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ObjectVersion - if *v == nil { - sv = &types.ObjectVersion{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumAlgorithm", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ChecksumType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumType = types.ChecksumType(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("IsLatest", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) - } - sv.IsLatest = ptr.Bool(xtv) - } - - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("Owner", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("RestoreStatus", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.ObjectVersionStorageClass(xtv) - } - - case strings.EqualFold("VersionId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.VersionId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ObjectVersion - if *v == nil { - sv = make([]types.ObjectVersion, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ObjectVersion - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error { - var sv []types.ObjectVersion - if *v == nil { - sv = make([]types.ObjectVersion, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ObjectVersion - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Owner - if *v == nil { - sv = &types.Owner{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DisplayName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DisplayName = ptr.String(xtv) - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.OwnershipControls - if *v == nil { - sv = &types.OwnershipControls{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.OwnershipControlsRule - if *v == nil { - sv = &types.OwnershipControlsRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ObjectOwnership", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ObjectOwnership = types.ObjectOwnership(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.OwnershipControlsRule - if *v == nil { - sv = make([]types.OwnershipControlsRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.OwnershipControlsRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error { - var sv []types.OwnershipControlsRule - if *v == nil { - sv = make([]types.OwnershipControlsRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.OwnershipControlsRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Part - if *v == nil { - sv = &types.Part{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ChecksumCRC32", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC32C", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC32C = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumCRC64NVME = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA1", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA1 = ptr.String(xtv) - } - - case strings.EqualFold("ChecksumSHA256", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ChecksumSHA256 = ptr.String(xtv) - } - - case strings.EqualFold("ETag", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ETag = ptr.String(xtv) - } - - case strings.EqualFold("LastModified", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.LastModified = ptr.Time(t) - } - - case strings.EqualFold("PartNumber", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PartNumber = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Size", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Size = ptr.Int64(i64) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartitionedPrefix(v **types.PartitionedPrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PartitionedPrefix - if *v == nil { - sv = &types.PartitionedPrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PartitionDateSource", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.PartitionDateSource = types.PartitionDateSource(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Part - if *v == nil { - sv = make([]types.Part, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Part - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error { - var sv []types.Part - if *v == nil { - sv = make([]types.Part, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Part - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ObjectPart - if *v == nil { - sv = make([]types.ObjectPart, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ObjectPart - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error { - var sv []types.ObjectPart - if *v == nil { - sv = make([]types.ObjectPart, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ObjectPart - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PolicyStatus - if *v == nil { - sv = &types.PolicyStatus{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsPublic", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) - } - sv.IsPublic = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PublicAccessBlockConfiguration - if *v == nil { - sv = &types.PublicAccessBlockConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("BlockPublicAcls", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.BlockPublicAcls = ptr.Bool(xtv) - } - - case strings.EqualFold("BlockPublicPolicy", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.BlockPublicPolicy = ptr.Bool(xtv) - } - - case strings.EqualFold("IgnorePublicAcls", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.IgnorePublicAcls = ptr.Bool(xtv) - } - - case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) - } - sv.RestrictPublicBuckets = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.QueueConfiguration - if *v == nil { - sv = &types.QueueConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Queue", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.QueueArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.QueueConfiguration - if *v == nil { - sv = make([]types.QueueConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.QueueConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.QueueConfiguration - if *v == nil { - sv = make([]types.QueueConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.QueueConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Redirect - if *v == nil { - sv = &types.Redirect{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HostName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HostName = ptr.String(xtv) - } - - case strings.EqualFold("HttpRedirectCode", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HttpRedirectCode = ptr.String(xtv) - } - - case strings.EqualFold("Protocol", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Protocol = types.Protocol(xtv) - } - - case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplaceKeyPrefixWith = ptr.String(xtv) - } - - case strings.EqualFold("ReplaceKeyWith", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ReplaceKeyWith = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RedirectAllRequestsTo - if *v == nil { - sv = &types.RedirectAllRequestsTo{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("HostName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.HostName = ptr.String(xtv) - } - - case strings.EqualFold("Protocol", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Protocol = types.Protocol(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicaModifications - if *v == nil { - sv = &types.ReplicaModifications{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicaModificationsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationConfiguration - if *v == nil { - sv = &types.ReplicationConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Role", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Role = ptr.String(xtv) - } - - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRule - if *v == nil { - sv = &types.ReplicationRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DeleteMarkerReplication", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ExistingObjectReplication", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("ID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.ID = ptr.String(xtv) - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Priority", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Priority = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicationRuleStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRuleAndOperator - if *v == nil { - sv = &types.ReplicationRuleAndOperator{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationRuleFilter - if *v == nil { - sv = &types.ReplicationRuleFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("And", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Prefix", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Prefix = ptr.String(xtv) - } - - case strings.EqualFold("Tag", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ReplicationRule - if *v == nil { - sv = make([]types.ReplicationRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ReplicationRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error { - var sv []types.ReplicationRule - if *v == nil { - sv = make([]types.ReplicationRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ReplicationRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationTime - if *v == nil { - sv = &types.ReplicationTime{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.ReplicationTimeStatus(xtv) - } - - case strings.EqualFold("Time", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ReplicationTimeValue - if *v == nil { - sv = &types.ReplicationTimeValue{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Minutes", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Minutes = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RestoreStatus - if *v == nil { - sv = &types.RestoreStatus{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("IsRestoreInProgress", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val) - } - sv.IsRestoreInProgress = ptr.Bool(xtv) - } - - case strings.EqualFold("RestoreExpiryDate", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.RestoreExpiryDate = ptr.Time(t) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RoutingRule - if *v == nil { - sv = &types.RoutingRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Condition", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Redirect", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.RoutingRule - if *v == nil { - sv = make([]types.RoutingRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("RoutingRule", t.Name.Local): - var col types.RoutingRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error { - var sv []types.RoutingRule - if *v == nil { - sv = make([]types.RoutingRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.RoutingRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.S3KeyFilter - if *v == nil { - sv = &types.S3KeyFilter{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("FilterRule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.S3TablesDestinationResult - if *v == nil { - sv = &types.S3TablesDestinationResult{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TableArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableArn = ptr.String(xtv) - } - - case strings.EqualFold("TableBucketArn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableBucketArn = ptr.String(xtv) - } - - case strings.EqualFold("TableName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableName = ptr.String(xtv) - } - - case strings.EqualFold("TableNamespace", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TableNamespace = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionByDefault - if *v == nil { - sv = &types.ServerSideEncryptionByDefault{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("KMSMasterKeyID", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KMSMasterKeyID = ptr.String(xtv) - } - - case strings.EqualFold("SSEAlgorithm", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SSEAlgorithm = types.ServerSideEncryption(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionConfiguration - if *v == nil { - sv = &types.ServerSideEncryptionConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Rule", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ServerSideEncryptionRule - if *v == nil { - sv = &types.ServerSideEncryptionRule{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("BucketKeyEnabled", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) - } - sv.BucketKeyEnabled = ptr.Bool(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.ServerSideEncryptionRule - if *v == nil { - sv = make([]types.ServerSideEncryptionRule, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.ServerSideEncryptionRule - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error { - var sv []types.ServerSideEncryptionRule - if *v == nil { - sv = make([]types.ServerSideEncryptionRule, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.ServerSideEncryptionRule - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SessionCredentials - if *v == nil { - sv = &types.SessionCredentials{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessKeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessKeyId = ptr.String(xtv) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Expiration = ptr.Time(t) - } - - case strings.EqualFold("SecretAccessKey", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SecretAccessKey = ptr.String(xtv) - } - - case strings.EqualFold("SessionToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SessionToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SimplePrefix - if *v == nil { - sv = &types.SimplePrefix{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SourceSelectionCriteria - if *v == nil { - sv = &types.SourceSelectionCriteria{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("ReplicaModifications", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SSEKMS - if *v == nil { - sv = &types.SSEKMS{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("KeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SseKmsEncryptedObjects - if *v == nil { - sv = &types.SseKmsEncryptedObjects{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = types.SseKmsEncryptedObjectsStatus(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.SSES3 - if *v == nil { - sv = &types.SSES3{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.StorageClassAnalysis - if *v == nil { - sv = &types.StorageClassAnalysis{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DataExport", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.StorageClassAnalysisDataExport - if *v == nil { - sv = &types.StorageClassAnalysisDataExport{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Destination", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("OutputSchemaVersion", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Tag - if *v == nil { - sv = &types.Tag{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Key", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Key = ptr.String(xtv) - } - - case strings.EqualFold("Value", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Value = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Tag - if *v == nil { - sv = make([]types.Tag, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Tag", t.Name.Local): - var col types.Tag - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error { - var sv []types.Tag - if *v == nil { - sv = make([]types.Tag, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Tag - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TargetGrant - if *v == nil { - sv = &types.TargetGrant{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Grantee", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Permission", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Permission = types.BucketLogsPermission(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.TargetGrant - if *v == nil { - sv = make([]types.TargetGrant, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("Grant", t.Name.Local): - var col types.TargetGrant - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error { - var sv []types.TargetGrant - if *v == nil { - sv = make([]types.TargetGrant, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.TargetGrant - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTargetObjectKeyFormat(v **types.TargetObjectKeyFormat, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TargetObjectKeyFormat - if *v == nil { - sv = &types.TargetObjectKeyFormat{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PartitionedPrefix", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentPartitionedPrefix(&sv.PartitionedPrefix, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SimplePrefix", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentSimplePrefix(&sv.SimplePrefix, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Tiering - if *v == nil { - sv = &types.Tiering{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessTier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessTier = types.IntelligentTieringAccessTier(xtv) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Tiering - if *v == nil { - sv = make([]types.Tiering, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Tiering - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error { - var sv []types.Tiering - if *v == nil { - sv = make([]types.Tiering, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Tiering - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TooManyParts - if *v == nil { - sv = &types.TooManyParts{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.TopicConfiguration - if *v == nil { - sv = &types.TopicConfiguration{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Event", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Filter", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Id", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Id = ptr.String(xtv) - } - - case strings.EqualFold("Topic", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TopicArn = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.TopicConfiguration - if *v == nil { - sv = make([]types.TopicConfiguration, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.TopicConfiguration - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { - var sv []types.TopicConfiguration - if *v == nil { - sv = make([]types.TopicConfiguration, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.TopicConfiguration - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Transition - if *v == nil { - sv = &types.Transition{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Date", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Date = ptr.Time(t) - } - - case strings.EqualFold("Days", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.Days = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("StorageClass", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.StorageClass = types.TransitionStorageClass(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.Transition - if *v == nil { - sv = make([]types.Transition, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("member", t.Name.Local): - var col types.Transition - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error { - var sv []types.Transition - if *v == nil { - sv = make([]types.Transition, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.Transition - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go deleted file mode 100644 index d825a41a7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package s3 provides the API client, operations, and parameter types for Amazon -// Simple Storage Service. -package s3 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go deleted file mode 100644 index bb5f4cf47..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go +++ /dev/null @@ -1,126 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - smithyauth "github.com/aws/smithy-go/auth" -) - -type endpointAuthResolver struct { - EndpointResolver EndpointResolverV2 -} - -var _ AuthSchemeResolver = (*endpointAuthResolver)(nil) - -func (r *endpointAuthResolver) ResolveAuthSchemes( - ctx context.Context, params *AuthResolverParameters, -) ( - []*smithyauth.Option, error, -) { - if params.endpointParams.Region == nil { - // #2502: We're correcting the endpoint binding behavior to treat empty - // Region as "unset" (nil), but auth resolution technically doesn't - // care and someone could be using V1 or non-default V2 endpoint - // resolution, both of which would bypass the required-region check. - // They shouldn't be broken because the region is technically required - // by this service's endpoint-based auth resolver, so we stub it here. - params.endpointParams.Region = aws.String("") - } - - opts, err := r.resolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - // canonicalize sigv4-s3express ID - for _, opt := range opts { - if opt.SchemeID == "sigv4-s3express" { - opt.SchemeID = "com.amazonaws.s3#sigv4express" - } - } - - // preserve pre-SRA behavior where everything technically had anonymous - return append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }), nil -} - -func (r *endpointAuthResolver) resolveAuthSchemes( - ctx context.Context, params *AuthResolverParameters, -) ( - []*smithyauth.Option, error, -) { - baseOpts, err := (&defaultAuthSchemeResolver{}).ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, fmt.Errorf("get base options: %w", err) - } - - endpt, err := r.EndpointResolver.ResolveEndpoint(ctx, *params.endpointParams) - if err != nil { - return nil, fmt.Errorf("resolve endpoint: %w", err) - } - - endptOpts, ok := smithyauth.GetAuthOptions(&endpt.Properties) - if !ok { - return baseOpts, nil - } - - // the list of options from the endpoint is authoritative, however, the - // modeled options have some properties that the endpoint ones don't, so we - // start from the latter and merge in - for _, endptOpt := range endptOpts { - if baseOpt := findScheme(baseOpts, endptOpt.SchemeID); baseOpt != nil { - rebaseProps(endptOpt, baseOpt) - } - } - - return endptOpts, nil -} - -// rebase the properties of dst, taking src as the base and overlaying those -// from dst -func rebaseProps(dst, src *smithyauth.Option) { - iprops, sprops := src.IdentityProperties, src.SignerProperties - - iprops.SetAll(&dst.IdentityProperties) - sprops.SetAll(&dst.SignerProperties) - - dst.IdentityProperties = iprops - dst.SignerProperties = sprops -} - -func findScheme(opts []*smithyauth.Option, schemeID string) *smithyauth.Option { - for _, opt := range opts { - if opt.SchemeID == schemeID { - return opt - } - } - return nil -} - -func finalizeServiceEndpointAuthResolver(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &endpointAuthResolver{ - EndpointResolver: options.EndpointResolverV2, - } -} - -func finalizeOperationEndpointAuthResolver(options *Options) { - resolver, ok := options.AuthSchemeResolver.(*endpointAuthResolver) - if !ok { - return - } - - if resolver.EndpointResolver == options.EndpointResolverV2 { - return - } - - options.AuthSchemeResolver = &endpointAuthResolver{ - EndpointResolver: options.EndpointResolverV2, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go deleted file mode 100644 index 1a4af2822..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ /dev/null @@ -1,6423 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/endpoints/private/rulesfn" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "s3" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - - if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { - if options.UseDualstack { - options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled - } else { - options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The S3 bucket used to send the request. This is an optional parameter that will - // be set automatically for operations that are scoped to an S3 bucket. - // - // Parameter - // is required. - Bucket *string - - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string - - // When true, force a path-style endpoint to be used where the bucket name is part - // of the path. - // - // Defaults to false if no value is - // provided. - // - // AWS::S3::ForcePathStyle - ForcePathStyle *bool - - // When true, use S3 Accelerate. NOTE: Not all regions support S3 - // accelerate. - // - // Defaults to false if no value is provided. - // - // AWS::S3::Accelerate - Accelerate *bool - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - // - // Defaults to false if no value is - // provided. - // - // AWS::S3::UseGlobalEndpoint - UseGlobalEndpoint *bool - - // Internal parameter to use object lambda endpoint for an operation (eg: - // WriteGetObjectResponse) - // - // Parameter is required. - UseObjectLambdaEndpoint *bool - - // The S3 Key used to send the request. This is an optional parameter that will be - // set automatically for operations that are scoped to an S3 Key. - // - // Parameter is - // required. - Key *string - - // The S3 Prefix used to send the request. This is an optional parameter that will - // be set automatically for operations that are scoped to an S3 Prefix. - // - // Parameter - // is required. - Prefix *string - - // The Copy Source used for Copy Object request. This is an optional parameter that - // will be set automatically for operations that are scoped to Copy - // Source. - // - // Parameter is required. - CopySource *string - - // Internal parameter to disable Access Point Buckets - // - // Parameter is required. - DisableAccessPoints *bool - - // Whether multi-region access points (MRAP) should be disabled. - // - // Defaults to false - // if no value is provided. - // - // AWS::S3::DisableMultiRegionAccessPoints - DisableMultiRegionAccessPoints *bool - - // When an Access Point ARN is provided and this flag is enabled, the SDK MUST use - // the ARN's region when constructing the endpoint instead of the client's - // configured region. - // - // Parameter is required. - // - // AWS::S3::UseArnRegion - UseArnRegion *bool - - // Internal parameter to indicate whether S3Express operation should use control - // plane, (ex. CreateBucket) - // - // Parameter is required. - UseS3ExpressControlEndpoint *bool - - // Parameter to indicate whether S3Express session auth should be - // disabled - // - // Parameter is required. - DisableS3ExpressSessionAuth *bool -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.Accelerate == nil { - return fmt.Errorf("parameter Accelerate is required") - } - - if p.DisableMultiRegionAccessPoints == nil { - return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") - } - - if p.ForcePathStyle == nil { - return fmt.Errorf("parameter ForcePathStyle is required") - } - - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.Accelerate == nil { - p.Accelerate = ptr.Bool(false) - } - - if p.DisableMultiRegionAccessPoints == nil { - p.DisableMultiRegionAccessPoints = ptr.Bool(false) - } - - if p.ForcePathStyle == nil { - p.ForcePathStyle = ptr.Bool(false) - } - - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseFIPS := *params.UseFIPS - _UseDualStack := *params.UseDualStack - _ForcePathStyle := *params.ForcePathStyle - _Accelerate := *params.Accelerate - _UseGlobalEndpoint := *params.UseGlobalEndpoint - _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints - - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if _Accelerate == true { - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") - } - } - if _UseDualStack == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") - } - } - if _UseFIPS == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _partitionResult.Name == "aws-cn" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil { - _bucketSuffix := *exprVal - _ = _bucketSuffix - if _bucketSuffix == "--x-s3" { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if _url.IsIp == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { - _UseS3ExpressControlEndpoint := *exprVal - _ = _UseS3ExpressControlEndpoint - if _UseS3ExpressControlEndpoint == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if !(params.Endpoint != nil) { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control-fips.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - } - if !(params.Bucket != nil) { - if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { - _UseS3ExpressControlEndpoint := *exprVal - _ = _UseS3ExpressControlEndpoint - if _UseS3ExpressControlEndpoint == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control-fips.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := rulesfn.SubString(_Bucket, 49, 50, true); exprVal != nil { - _hardwareType := *exprVal - _ = _hardwareType - if exprVal := rulesfn.SubString(_Bucket, 8, 12, true); exprVal != nil { - _regionPrefix := *exprVal - _ = _regionPrefix - if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { - _bucketAliasSuffix := *exprVal - _ = _bucketAliasSuffix - if exprVal := rulesfn.SubString(_Bucket, 32, 49, true); exprVal != nil { - _outpostId := *exprVal - _ = _outpostId - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _regionPartition := *exprVal - _ = _regionPartition - if _bucketAliasSuffix == "--op-s3" { - if rulesfn.IsValidHostLabel(_outpostId, false) { - if _hardwareType == "e" { - if _regionPrefix == "beta" { - if !(params.Endpoint != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".ec2.") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".ec2.s3-outposts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_regionPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _hardwareType == "o" { - if _regionPrefix == "beta" { - if !(params.Endpoint != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".op-") - out.WriteString(_outpostId) - out.WriteString(".") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".op-") - out.WriteString(_outpostId) - out.WriteString(".s3-outposts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_regionPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Unrecognized hardware type: \"Expected hardware type o or e but got ") - out.WriteString(_hardwareType) - out.WriteString("\"") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.") - } - } - } - } - } - } - } - if exprVal := params.Bucket; exprVal != nil { - _Bucket := *exprVal - _ = _Bucket - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if !(rulesfn.ParseURL(_Endpoint) != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Custom endpoint `") - out.WriteString(_Endpoint) - out.WriteString("` was not a valid URI") - return out.String() - }()) - } - } - if _ForcePathStyle == false { - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, false) { - if _Accelerate == true { - if _partitionResult.Name == "aws-cn" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Accelerate cannot be used in this region") - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == true { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.dualstack.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == true { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_Bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.IsIp == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3-accelerate.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if _UseFIPS == false { - if _Accelerate == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _url.Scheme == "http" { - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, true) { - if _ForcePathStyle == false { - if _UseFIPS == false { - if _UseDualStack == false { - if _Accelerate == false { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, false) { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_Bucket) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - } - } - } - } - } - } - if _ForcePathStyle == false { - if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _bucketArn := *exprVal - _ = _bucketArn - if exprVal := _bucketArn.ResourceId.Get(0); exprVal != nil { - _arnType := *exprVal - _ = _arnType - if !(_arnType == "") { - if _bucketArn.Service == "s3-object-lambda" { - if _arnType == "accesspoint" { - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if !(_accessPointName == "") { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") - } - if !(_bucketArn.Region == "") { - if exprVal := params.DisableAccessPoints; exprVal != nil { - _DisableAccessPoints := *exprVal - _ = _DisableAccessPoints - if _DisableAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") - } - } - if !(_bucketArn.ResourceId.Get(2) != nil) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if _bucketArn.AccountId == "" { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Missing account id") - } - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if rulesfn.IsValidHostLabel(_accessPointName, false) { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-object-lambda-fips.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-object-lambda.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_accessPointName) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: bucket ARN is missing a region") - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `") - out.WriteString(_arnType) - out.WriteString("`") - return out.String() - }()) - } - if _arnType == "accesspoint" { - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if !(_accessPointName == "") { - if !(_bucketArn.Region == "") { - if _arnType == "accesspoint" { - if !(_bucketArn.Region == "") { - if exprVal := params.DisableAccessPoints; exprVal != nil { - _DisableAccessPoints := *exprVal - _ = _DisableAccessPoints - if _DisableAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") - } - } - if !(_bucketArn.ResourceId.Get(2) != nil) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if _bucketArn.Service == "s3" { - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if rulesfn.IsValidHostLabel(_accessPointName, false) { - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Access Points do not support S3 Accelerate") - } - if _UseFIPS == true { - if _UseDualStack == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint-fips.dualstack.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - if _UseDualStack == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint-fips.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == false { - if _UseDualStack == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint.dualstack.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".s3-accesspoint.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_accessPointName) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The ARN was not for the S3 service, found: ") - out.WriteString(_bucketArn.Service) - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - if rulesfn.IsValidHostLabel(_accessPointName, true) { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support dual-stack") - } - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support FIPS") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support S3 Accelerate") - } - if _DisableMultiRegionAccessPoints == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid configuration: Multi-Region Access Point ARNs are disabled.") - } - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _mrapPartition := *exprVal - _ = _mrapPartition - if _mrapPartition.Name == _bucketArn.Partition { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString(".accesspoint.s3-global.") - out.WriteString(_mrapPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_mrapPartition.Name) - out.WriteString("` but bucket referred to partition `") - out.WriteString(_bucketArn.Partition) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Access Point Name") - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") - } - if _bucketArn.Service == "s3-outposts" { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support Dual-stack") - } - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support FIPS") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") - } - if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_287 := *exprVal - _ = _var_287 - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") - } - if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { - _outpostId := *exprVal - _ = _outpostId - if rulesfn.IsValidHostLabel(_outpostId, false) { - if exprVal := params.UseArnRegion; exprVal != nil { - _UseArnRegion := *exprVal - _ = _UseArnRegion - if _UseArnRegion == false { - if !(_bucketArn.Region == _Region) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid configuration: region from ARN `") - out.WriteString(_bucketArn.Region) - out.WriteString("` does not match client region `") - out.WriteString(_Region) - out.WriteString("` and UseArnRegion is `false`") - return out.String() - }()) - } - } - } - if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { - _bucketPartition := *exprVal - _ = _bucketPartition - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _bucketPartition.Name == _partitionResult.Name { - if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { - if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { - if exprVal := _bucketArn.ResourceId.Get(2); exprVal != nil { - _outpostType := *exprVal - _ = _outpostType - if exprVal := _bucketArn.ResourceId.Get(3); exprVal != nil { - _accessPointName := *exprVal - _ = _accessPointName - if _outpostType == "accesspoint" { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_outpostId) - out.WriteString(".") - out.WriteString(_url.Authority) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_accessPointName) - out.WriteString("-") - out.WriteString(_bucketArn.AccountId) - out.WriteString(".") - out.WriteString(_outpostId) - out.WriteString(".s3-outposts.") - out.WriteString(_bucketArn.Region) - out.WriteString(".") - out.WriteString(_bucketPartition.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4a", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) - return sp - }(), - }, - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") - smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") - - smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Expected an outpost type `accesspoint`, found ") - out.WriteString(_outpostType) - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: expected an access point name") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a 4-component resource") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_bucketArn.AccountId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid region in ARN: `") - out.WriteString(_bucketArn.Region) - out.WriteString("` (invalid DNS name)") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Client was configured for partition `") - out.WriteString(_partitionResult.Name) - out.WriteString("` but ARN (`") - out.WriteString(_Bucket) - out.WriteString("`) has `") - out.WriteString(_bucketPartition.Name) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `") - out.WriteString(_outpostId) - out.WriteString("`") - return out.String() - }()) - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The Outpost Id was not set") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: Unrecognized format: ") - out.WriteString(_Bucket) - out.WriteString(" (type: ") - out.WriteString(_arnType) - out.WriteString(")") - return out.String() - }()) - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: No ARN type specified") - } - } - if exprVal := rulesfn.SubString(_Bucket, 0, 4, false); exprVal != nil { - _arnPrefix := *exprVal - _ = _arnPrefix - if _arnPrefix == "arn:" { - if !(awsrulesfn.ParseARN(_Bucket) != nil) { - return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { - var out strings.Builder - out.WriteString("Invalid ARN: `") - out.WriteString(_Bucket) - out.WriteString("` was not a valid ARN") - return out.String() - }()) - } - } - } - if _ForcePathStyle == true { - if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_300 := *exprVal - _ = _var_300 - return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") - } - } - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if _Accelerate == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == true { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.NormalizedPath) - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _UseFIPS == false { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - out.WriteString("/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with S3 Accelerate") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - if exprVal := params.UseObjectLambdaEndpoint; exprVal != nil { - _UseObjectLambdaEndpoint := *exprVal - _ = _UseObjectLambdaEndpoint - if _UseObjectLambdaEndpoint == true { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, true) { - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") - } - if _Accelerate == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-object-lambda-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-object-lambda.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") - smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if !(params.Bucket != nil) { - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _partitionResult := *exprVal - _ = _partitionResult - if rulesfn.IsValidHostLabel(_Region, true) { - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == true { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.us-east-1.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == true { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.dualstack.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if _Region == "aws-global" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == true { - if _Region == "us-east-1" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - if _UseFIPS == false { - if _UseDualStack == false { - if !(params.Endpoint != nil) { - if !(_Region == "aws-global") { - if _UseGlobalEndpoint == false { - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_partitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3") - smithyhttp.SetSigV4ASigningName(&sp, "s3") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "A region must be set when sending requests to S3.") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - params.ForcePathStyle = aws.Bool(options.UsePathStyle) - params.Accelerate = aws.Bool(options.UseAccelerate) - params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints) - params.UseArnRegion = aws.Bool(options.UseARNRegion) - - params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - ctx = setS3ResolvedURI(ctx, endpt.URI.String()) - - backend := s3cust.GetPropertiesBackend(&endpt.Properties) - ctx = internalcontext.SetS3Backend(ctx, backend) - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go deleted file mode 100644 index d6cdb5337..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go +++ /dev/null @@ -1,285 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream" - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" - smithysync "github.com/aws/smithy-go/sync" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "sync" -) - -// SelectObjectContentEventStreamReader provides the interface for reading events -// from a stream. -// -// The writer's Close method must allow multiple concurrent calls. -type SelectObjectContentEventStreamReader interface { - Events() <-chan types.SelectObjectContentEventStream - Close() error - Err() error -} - -type selectObjectContentEventStreamReader struct { - stream chan types.SelectObjectContentEventStream - decoder *eventstream.Decoder - eventStream io.ReadCloser - err *smithysync.OnceErr - payloadBuf []byte - done chan struct{} - closeOnce sync.Once -} - -func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader { - w := &selectObjectContentEventStreamReader{ - stream: make(chan types.SelectObjectContentEventStream), - decoder: decoder, - eventStream: readCloser, - err: smithysync.NewOnceErr(), - done: make(chan struct{}), - payloadBuf: make([]byte, 10*1024), - } - - go w.readEventStream() - - return w -} - -func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream { - return r.stream -} - -func (r *selectObjectContentEventStreamReader) readEventStream() { - defer r.Close() - defer close(r.stream) - - for { - r.payloadBuf = r.payloadBuf[0:0] - decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf) - if err != nil { - if err == io.EOF { - return - } - select { - case <-r.done: - return - default: - r.err.SetError(err) - return - } - } - - event, err := r.deserializeEventMessage(&decodedMessage) - if err != nil { - r.err.SetError(err) - return - } - - select { - case r.stream <- event: - case <-r.done: - return - } - - } -} - -func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) { - messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader) - if messageType == nil { - return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader) - } - - switch messageType.String() { - case eventstreamapi.EventMessageType: - var v types.SelectObjectContentEventStream - if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil { - return nil, err - } - return v, nil - - case eventstreamapi.ExceptionMessageType: - return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg) - - case eventstreamapi.ErrorMessageType: - errorCode := "UnknownError" - errorMessage := errorCode - if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil { - errorCode = header.String() - } - if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil { - errorMessage = header.String() - } - return nil, &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - - default: - mc := msg.Clone() - return nil, &UnknownEventMessageError{ - Type: messageType.String(), - Message: &mc, - } - - } -} - -func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} { - return r.err.ErrorSet() -} - -func (r *selectObjectContentEventStreamReader) Close() error { - r.closeOnce.Do(r.safeClose) - return r.Err() -} - -func (r *selectObjectContentEventStreamReader) safeClose() { - close(r.done) - r.eventStream.Close() - -} - -func (r *selectObjectContentEventStreamReader) Err() error { - return r.err.Err() -} - -func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} { - return r.done -} - -type awsRestxml_deserializeOpEventStreamSelectObjectContent struct { - LogEventStreamWrites bool - LogEventStreamReads bool -} - -func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string { - return "OperationEventStreamDeserializer" -} - -func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - defer func() { - if err == nil { - return - } - m.closeResponseBody(out) - }() - - logger := middleware.GetLogger(ctx) - - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - _ = request - - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) - } - _ = deserializeOutput - - output, ok := out.Result.(*SelectObjectContentOutput) - if out.Result != nil && !ok { - return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result) - } else if out.Result == nil { - output = &SelectObjectContentOutput{} - out.Result = output - } - - eventReader := newSelectObjectContentEventStreamReader( - deserializeOutput.Body, - eventstream.NewDecoder(func(options *eventstream.DecoderOptions) { - options.Logger = logger - options.LogMessages = m.LogEventStreamReads - - }), - ) - defer func() { - if err == nil { - return - } - _ = eventReader.Close() - }() - - output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) { - stream.Reader = eventReader - }) - - go output.eventStream.waitStreamClose() - - return out, metadata, nil -} - -func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) { - if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil { - _, _ = io.Copy(ioutil.Discard, resp.Body) - _ = resp.Body.Close() - } -} - -func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error { - if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{ - LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(), - LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(), - }, "OperationDeserializer", middleware.Before); err != nil { - return err - } - return nil - -} - -// UnknownEventMessageError provides an error when a message is received from the stream, -// but the reader is unable to determine what kind of message it is. -type UnknownEventMessageError struct { - Type string - Message *eventstream.Message -} - -// Error retruns the error message string. -func (e *UnknownEventMessageError) Error() string { - return "unknown event stream message type, " + e.Type -} - -func setSafeEventStreamClientLogMode(o *Options, operation string) { - switch operation { - case "SelectObjectContent": - toggleEventStreamClientLogMode(o, false, true) - return - - default: - return - - } -} -func toggleEventStreamClientLogMode(o *Options, request, response bool) { - mode := o.ClientLogMode - - if request && mode.IsRequestWithBody() { - mode.ClearRequestWithBody() - mode |= aws.LogRequest - } - - if response && mode.IsResponseWithBody() { - mode.ClearResponseWithBody() - mode |= aws.LogResponse - } - - o.ClientLogMode = mode - -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go deleted file mode 100644 index bbac9ca27..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go +++ /dev/null @@ -1,9 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" -) - -// ExpressCredentialsProvider retrieves credentials for operations against the -// S3Express storage class. -type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go deleted file mode 100644 index 3b35a3e57..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go +++ /dev/null @@ -1,170 +0,0 @@ -package s3 - -import ( - "context" - "crypto/hmac" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" - "github.com/aws/smithy-go/container/private/cache" - "github.com/aws/smithy-go/container/private/cache/lru" -) - -const s3ExpressCacheCap = 100 - -const s3ExpressRefreshWindow = 1 * time.Minute - -type cacheKey struct { - CredentialsHash string // hmac(sigv4 akid, sigv4 secret) - Bucket string -} - -func (c cacheKey) Slug() string { - return fmt.Sprintf("%s%s", c.CredentialsHash, c.Bucket) -} - -type sessionCredsCache struct { - mu sync.Mutex - cache cache.Cache -} - -func (c *sessionCredsCache) Get(key cacheKey) (*aws.Credentials, bool) { - c.mu.Lock() - defer c.mu.Unlock() - - if v, ok := c.cache.Get(key); ok { - return v.(*aws.Credentials), true - } - return nil, false -} - -func (c *sessionCredsCache) Put(key cacheKey, creds *aws.Credentials) { - c.mu.Lock() - defer c.mu.Unlock() - - c.cache.Put(key, creds) -} - -// The default S3Express provider uses an LRU cache with a capacity of 100. -// -// Credentials will be refreshed asynchronously when a Retrieve() call is made -// for cached credentials within an expiry window (1 minute, currently -// non-configurable). -type defaultS3ExpressCredentialsProvider struct { - sf singleflight.Group - - client createSessionAPIClient - cache *sessionCredsCache - refreshWindow time.Duration - v4creds aws.CredentialsProvider // underlying credentials used for CreateSession -} - -type createSessionAPIClient interface { - CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) -} - -func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider { - return &defaultS3ExpressCredentialsProvider{ - cache: &sessionCredsCache{ - cache: lru.New(s3ExpressCacheCap), - }, - refreshWindow: s3ExpressRefreshWindow, - } -} - -// returns a cloned provider using new base credentials, used when per-op -// config mutations change the credentials provider -func (p *defaultS3ExpressCredentialsProvider) CloneWithBaseCredentials(v4creds aws.CredentialsProvider) *defaultS3ExpressCredentialsProvider { - return &defaultS3ExpressCredentialsProvider{ - client: p.client, - cache: p.cache, - refreshWindow: p.refreshWindow, - v4creds: v4creds, - } -} - -func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { - v4creds, err := p.v4creds.Retrieve(ctx) - if err != nil { - return aws.Credentials{}, fmt.Errorf("get sigv4 creds: %w", err) - } - - key := cacheKey{ - CredentialsHash: gethmac(v4creds.AccessKeyID, v4creds.SecretAccessKey), - Bucket: bucket, - } - creds, ok := p.cache.Get(key) - if !ok || creds.Expired() { - return p.awaitDoChanRetrieve(ctx, key) - } - - if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow { - p.doChanRetrieve(ctx, key) - } - - return *creds, nil -} - -func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, key cacheKey) <-chan singleflight.Result { - return p.sf.DoChan(key.Slug(), func() (interface{}, error) { - return p.retrieve(ctx, key) - }) -} - -func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { - ch := p.doChanRetrieve(ctx, key) - - select { - case r := <-ch: - return r.Val.(aws.Credentials), r.Err - case <-ctx.Done(): - return aws.Credentials{}, errors.New("s3express retrieve credentials canceled") - } -} - -func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { - resp, err := p.client.CreateSession(ctx, &CreateSessionInput{ - Bucket: aws.String(key.Bucket), - }) - if err != nil { - return aws.Credentials{}, err - } - - creds, err := credentialsFromResponse(resp) - if err != nil { - return aws.Credentials{}, err - } - - p.cache.Put(key, creds) - return *creds, nil -} - -func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) { - if o.Credentials == nil { - return nil, errors.New("s3express session credentials unset") - } - - if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil { - return nil, errors.New("s3express session credentials missing one or more required fields") - } - - return &aws.Credentials{ - AccessKeyID: *o.Credentials.AccessKeyId, - SecretAccessKey: *o.Credentials.SecretAccessKey, - SessionToken: *o.Credentials.SessionToken, - CanExpire: true, - Expires: *o.Credentials.Expiration, - }, nil -} - -func gethmac(p, key string) string { - hash := hmac.New(sha256.New, []byte(key)) - hash.Write([]byte(p)) - return string(hash.Sum(nil)) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go deleted file mode 100644 index 7c7a7b424..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go +++ /dev/null @@ -1,39 +0,0 @@ -package s3 - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" -) - -// If the caller hasn't provided an S3Express provider, we use our default -// which will grab a reference to the S3 client itself in finalization. -func resolveExpressCredentials(o *Options) { - if o.ExpressCredentials == nil { - o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider() - } -} - -// Config finalizer: if we're using the default S3Express implementation, grab -// a reference to the client for its CreateSession API, and the underlying -// sigv4 credentials provider for cache keying. -func finalizeExpressCredentials(o *Options, c *Client) { - if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { - p.client = c - p.v4creds = o.Credentials - } -} - -// Operation config finalizer: update the sigv4 credentials on the default -// express provider in case it changed to ensure different cache keys -func finalizeOperationExpressCredentials(o *Options, c Client) { - if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { - o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) - } -} - -// NewFromConfig resolver: pull from opaque sources if it exists. -func resolveDisableExpressAuth(cfg aws.Config, o *Options) { - if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok { - o.DisableS3ExpressSessionAuth = &v - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go deleted file mode 100644 index a9b54535b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go +++ /dev/null @@ -1,43 +0,0 @@ -package s3 - -import ( - "context" - "strings" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" -) - -// isExpressUserAgent tracks whether the caller is using S3 Express -// -// we can only derive this at runtime, so the middleware needs to hold a handle -// to the underlying user-agent manipulator to set the feature flag as -// necessary -type isExpressUserAgent struct { - ua *awsmiddleware.RequestUserAgent -} - -func (*isExpressUserAgent) ID() string { - return "isExpressUserAgent" -} - -func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - const expressSuffix = "--x-s3" - - bucket, ok := bucketFromInput(in.Parameters) - if ok && strings.HasSuffix(bucket, expressSuffix) { - m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) - } - return next.HandleSerialize(ctx, in) -} - -func addIsExpressUserAgent(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json deleted file mode 100644 index d865ac68d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", - "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_AbortMultipartUpload.go", - "api_op_CompleteMultipartUpload.go", - "api_op_CopyObject.go", - "api_op_CreateBucket.go", - "api_op_CreateBucketMetadataTableConfiguration.go", - "api_op_CreateMultipartUpload.go", - "api_op_CreateSession.go", - "api_op_DeleteBucket.go", - "api_op_DeleteBucketAnalyticsConfiguration.go", - "api_op_DeleteBucketCors.go", - "api_op_DeleteBucketEncryption.go", - "api_op_DeleteBucketIntelligentTieringConfiguration.go", - "api_op_DeleteBucketInventoryConfiguration.go", - "api_op_DeleteBucketLifecycle.go", - "api_op_DeleteBucketMetadataTableConfiguration.go", - "api_op_DeleteBucketMetricsConfiguration.go", - "api_op_DeleteBucketOwnershipControls.go", - "api_op_DeleteBucketPolicy.go", - "api_op_DeleteBucketReplication.go", - "api_op_DeleteBucketTagging.go", - "api_op_DeleteBucketWebsite.go", - "api_op_DeleteObject.go", - "api_op_DeleteObjectTagging.go", - "api_op_DeleteObjects.go", - "api_op_DeletePublicAccessBlock.go", - "api_op_GetBucketAccelerateConfiguration.go", - "api_op_GetBucketAcl.go", - "api_op_GetBucketAnalyticsConfiguration.go", - "api_op_GetBucketCors.go", - "api_op_GetBucketEncryption.go", - "api_op_GetBucketIntelligentTieringConfiguration.go", - "api_op_GetBucketInventoryConfiguration.go", - "api_op_GetBucketLifecycleConfiguration.go", - "api_op_GetBucketLocation.go", - "api_op_GetBucketLogging.go", - "api_op_GetBucketMetadataTableConfiguration.go", - "api_op_GetBucketMetricsConfiguration.go", - "api_op_GetBucketNotificationConfiguration.go", - "api_op_GetBucketOwnershipControls.go", - "api_op_GetBucketPolicy.go", - "api_op_GetBucketPolicyStatus.go", - "api_op_GetBucketReplication.go", - "api_op_GetBucketRequestPayment.go", - "api_op_GetBucketTagging.go", - "api_op_GetBucketVersioning.go", - "api_op_GetBucketWebsite.go", - "api_op_GetObject.go", - "api_op_GetObjectAcl.go", - "api_op_GetObjectAttributes.go", - "api_op_GetObjectLegalHold.go", - "api_op_GetObjectLockConfiguration.go", - "api_op_GetObjectRetention.go", - "api_op_GetObjectTagging.go", - "api_op_GetObjectTorrent.go", - "api_op_GetPublicAccessBlock.go", - "api_op_HeadBucket.go", - "api_op_HeadObject.go", - "api_op_ListBucketAnalyticsConfigurations.go", - "api_op_ListBucketIntelligentTieringConfigurations.go", - "api_op_ListBucketInventoryConfigurations.go", - "api_op_ListBucketMetricsConfigurations.go", - "api_op_ListBuckets.go", - "api_op_ListDirectoryBuckets.go", - "api_op_ListMultipartUploads.go", - "api_op_ListObjectVersions.go", - "api_op_ListObjects.go", - "api_op_ListObjectsV2.go", - "api_op_ListParts.go", - "api_op_PutBucketAccelerateConfiguration.go", - "api_op_PutBucketAcl.go", - "api_op_PutBucketAnalyticsConfiguration.go", - "api_op_PutBucketCors.go", - "api_op_PutBucketEncryption.go", - "api_op_PutBucketIntelligentTieringConfiguration.go", - "api_op_PutBucketInventoryConfiguration.go", - "api_op_PutBucketLifecycleConfiguration.go", - "api_op_PutBucketLogging.go", - "api_op_PutBucketMetricsConfiguration.go", - "api_op_PutBucketNotificationConfiguration.go", - "api_op_PutBucketOwnershipControls.go", - "api_op_PutBucketPolicy.go", - "api_op_PutBucketReplication.go", - "api_op_PutBucketRequestPayment.go", - "api_op_PutBucketTagging.go", - "api_op_PutBucketVersioning.go", - "api_op_PutBucketWebsite.go", - "api_op_PutObject.go", - "api_op_PutObjectAcl.go", - "api_op_PutObjectLegalHold.go", - "api_op_PutObjectLockConfiguration.go", - "api_op_PutObjectRetention.go", - "api_op_PutObjectTagging.go", - "api_op_PutPublicAccessBlock.go", - "api_op_RestoreObject.go", - "api_op_SelectObjectContent.go", - "api_op_UploadPart.go", - "api_op_UploadPartCopy.go", - "api_op_WriteGetObjectResponse.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "eventstream.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/enums.go", - "types/errors.go", - "types/types.go", - "types/types_exported_test.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/s3", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go deleted file mode 100644 index a9e3c4fb4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package s3 - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.73.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go deleted file mode 100644 index 6aae79e7c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go +++ /dev/null @@ -1,214 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" -) - -// ListObjectVersionsAPIClient is a client that implements the ListObjectVersions -// operation -type ListObjectVersionsAPIClient interface { - ListObjectVersions(context.Context, *ListObjectVersionsInput, ...func(*Options)) (*ListObjectVersionsOutput, error) -} - -var _ ListObjectVersionsAPIClient = (*Client)(nil) - -// ListObjectVersionsPaginatorOptions is the paginator options for ListObjectVersions -type ListObjectVersionsPaginatorOptions struct { - // (Optional) The maximum number of Object Versions that you want Amazon S3 to - // return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListObjectVersionsPaginator is a paginator for ListObjectVersions -type ListObjectVersionsPaginator struct { - options ListObjectVersionsPaginatorOptions - client ListObjectVersionsAPIClient - params *ListObjectVersionsInput - firstPage bool - keyMarker *string - versionIDMarker *string - isTruncated bool -} - -// NewListObjectVersionsPaginator returns a new ListObjectVersionsPaginator -func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params *ListObjectVersionsInput, optFns ...func(*ListObjectVersionsPaginatorOptions)) *ListObjectVersionsPaginator { - if params == nil { - params = &ListObjectVersionsInput{} - } - - options := ListObjectVersionsPaginatorOptions{} - if params.MaxKeys != nil { - options.Limit = aws.ToInt32(params.MaxKeys) - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListObjectVersionsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - keyMarker: params.KeyMarker, - versionIDMarker: params.VersionIdMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListObjectVersionsPaginator) HasMorePages() bool { - return p.firstPage || p.isTruncated -} - -// NextPage retrieves the next ListObjectVersions page. -func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.KeyMarker = p.keyMarker - params.VersionIdMarker = p.versionIDMarker - - var limit int32 - if p.options.Limit > 0 { - limit = p.options.Limit - } - if limit > 0 { - params.MaxKeys = aws.Int32(limit) - } - - result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.keyMarker - p.isTruncated = aws.ToBool(result.IsTruncated) - p.keyMarker = nil - p.versionIDMarker = nil - if aws.ToBool(result.IsTruncated) { - p.keyMarker = result.NextKeyMarker - p.versionIDMarker = result.NextVersionIdMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.keyMarker != nil && - *prevToken == *p.keyMarker { - p.isTruncated = false - } - - return result, nil -} - -// ListMultipartUploadsAPIClient is a client that implements the ListMultipartUploads -// operation -type ListMultipartUploadsAPIClient interface { - ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error) -} - -var _ ListMultipartUploadsAPIClient = (*Client)(nil) - -// ListMultipartUploadsPaginatorOptions is the paginator options for ListMultipartUploads -type ListMultipartUploadsPaginatorOptions struct { - // (Optional) The maximum number of Multipart Uploads that you want Amazon S3 to - // return. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads -type ListMultipartUploadsPaginator struct { - options ListMultipartUploadsPaginatorOptions - client ListMultipartUploadsAPIClient - params *ListMultipartUploadsInput - firstPage bool - keyMarker *string - uploadIDMarker *string - isTruncated bool -} - -// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator -func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator { - if params == nil { - params = &ListMultipartUploadsInput{} - } - - options := ListMultipartUploadsPaginatorOptions{} - if params.MaxUploads != nil { - options.Limit = aws.ToInt32(params.MaxUploads) - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListMultipartUploadsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - keyMarker: params.KeyMarker, - uploadIDMarker: params.UploadIdMarker, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListMultipartUploadsPaginator) HasMorePages() bool { - return p.firstPage || p.isTruncated -} - -// NextPage retrieves the next ListMultipartUploads page. -func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.KeyMarker = p.keyMarker - params.UploadIdMarker = p.uploadIDMarker - - var limit int32 - if p.options.Limit > 0 { - limit = p.options.Limit - } - if limit > 0 { - params.MaxUploads = aws.Int32(limit) - } - - result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.keyMarker - p.isTruncated = aws.ToBool(result.IsTruncated) - p.keyMarker = nil - p.uploadIDMarker = nil - if aws.ToBool(result.IsTruncated) { - p.keyMarker = result.NextKeyMarker - p.uploadIDMarker = result.NextUploadIdMarker - } - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.keyMarker != nil && - *prevToken == *p.keyMarker { - p.isTruncated = false - } - - return result, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go deleted file mode 100644 index 97b5771bb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go +++ /dev/null @@ -1,106 +0,0 @@ -package arn - -import ( - "fmt" - "strings" - - awsarn "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" -) - -const ( - s3Namespace = "s3" - s3ObjectsLambdaNamespace = "s3-object-lambda" - s3OutpostsNamespace = "s3-outposts" -) - -// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource. -func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) { - return arn.ParseResource(v, accessPointResourceParser) -} - -func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { - resParts := arn.SplitResource(a.Resource) - - switch resParts[0] { - case "accesspoint": - switch a.Service { - case s3Namespace: - return arn.ParseAccessPointResource(a, resParts[1:]) - case s3ObjectsLambdaNamespace: - return parseS3ObjectLambdaAccessPointResource(a, resParts) - default: - return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} - } - case "outpost": - if a.Service != s3OutpostsNamespace { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"} - } - return parseOutpostAccessPointResource(a, resParts[1:]) - default: - return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} - } -} - -func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { - // outpost accesspoint arn is only valid if service is s3-outposts - if a.Service != "s3-outposts" { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} - } - - if len(resParts) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - if len(resParts) < 3 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ - ARN: a, Reason: "access-point resource not set in Outpost ARN", - } - } - - resID := strings.TrimSpace(resParts[0]) - if len(resID) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} - } - - var outpostAccessPointARN = arn.OutpostAccessPointARN{} - switch resParts[1] { - case "accesspoint": - // Do not allow region-less outpost access-point arns. - if len(a.Region) == 0 { - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"} - } - - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) - if err != nil { - return arn.OutpostAccessPointARN{}, err - } - // set access-point arn - outpostAccessPointARN.AccessPointARN = accessPointARN - default: - return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} - } - - // set outpost id - outpostAccessPointARN.OutpostID = resID - return outpostAccessPointARN, nil -} - -func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { - if a.Service != s3ObjectsLambdaNamespace { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} - } - - if len(a.Region) == 0 { - return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} - } - - accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) - if err != nil { - return arn.S3ObjectLambdaAccessPointARN{}, err - } - - return arn.S3ObjectLambdaAccessPointARN{ - AccessPointARN: accessPointARN, - }, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go deleted file mode 100644 index 91b8fde0d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package customizations - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type bucketKey struct{} - -// SetBucket stores a bucket name within the request context, which is required -// for a variety of custom S3 behaviors. -func SetBucket(ctx context.Context, bucket string) context.Context { - return middleware.WithStackValue(ctx, bucketKey{}, bucket) -} - -// GetBucket retrieves a stored bucket name within a context. -func GetBucket(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go deleted file mode 100644 index e1d1cbefa..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Package customizations provides customizations for the Amazon S3 API client. - -This package provides support for following S3 customizations - - ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type - - UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options - - RemoveBucket Middleware: removes a serialized bucket name from request url path - - processResponseWith200Error Middleware: Deserializing response error with 200 status code - -# Virtual Host style url addressing - -Since serializers serialize by default as path style url, we use customization -to modify the endpoint url when `UsePathStyle` option on S3Client is unset or -false. This flag will be ignored if `UseAccelerate` option is set to true. - -If UseAccelerate is not enabled, and the bucket name is not a valid hostname -label, they SDK will fallback to forcing the request to be made as if -UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled. - -https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description - -# Transfer acceleration - -By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate` -option on S3Client, one can enable s3 transfer acceleration support. Transfer -acceleration only works with Virtual Host style addressing, and thus `UsePathStyle` -option if set is ignored. Transfer acceleration is not supported for S3 operations -DeleteBucket, ListBuckets, and CreateBucket. - -# Dualstack support - -By default dualstack support for s3 client is disabled. By enabling `UseDualstack` -option on s3 client, you can enable dualstack endpoint support. - -# Endpoint customizations - -Customizations to lookup ARN, process ARN needs to happen before request serialization. -UpdateEndpoint middleware which mutates resources based on Options such as -UseDualstack, UseAccelerate for modifying resolved endpoint are executed after -request serialization. Remove bucket middleware is executed after -an request is serialized, and removes the serialized bucket name from request path - - Middleware layering: - - Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step - - Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware - -Customization options: - - UseARNRegion (Disabled by Default) - - UsePathStyle (Disabled by Default) - - UseAccelerate (Disabled by Default) - - UseDualstack (Disabled by Default) - -# Handle Error response with 200 status code - -S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an -error Response with status code 2xx. The processResponseWith200Error middleware -customizations enables SDK to check for an error within response body prior to -deserialization. - -As the check for 2xx response containing an error needs to be performed earlier -than response deserialization. Since the behavior of Deserialization is in -reverse order to the other stack steps its easier to consider that "after" means -"before". - - Middleware layering: - - HTTP Response -> handle 200 error customization -> deserialize -*/ -package customizations diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go deleted file mode 100644 index 8cc0b3624..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go +++ /dev/null @@ -1,44 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage -// class. -type S3ExpressCredentialsProvider interface { - Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) -} - -// ExpressIdentityResolver retrieves identity for the S3Express storage class. -type ExpressIdentityResolver struct { - Provider S3ExpressCredentialsProvider -} - -var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil) - -// GetIdentity retrieves AWS credentials using the underlying provider. -func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) ( - auth.Identity, error, -) { - bucket, ok := GetIdentityPropertiesBucket(&props) - if !ok { - bucket = GetBucket(ctx) - } - if bucket == "" { - return nil, fmt.Errorf("bucket name is missing") - } - - creds, err := v.Provider.Retrieve(ctx, bucket) - if err != nil { - return nil, fmt.Errorf("get credentials: %v", err) - } - - return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go deleted file mode 100644 index bb22d3474..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go +++ /dev/null @@ -1,18 +0,0 @@ -package customizations - -type s3DisableExpressAuthProvider interface { - GetS3DisableExpressAuth() (bool, bool) -} - -// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config -// sources. -func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) { - for _, cfg := range configs { - if p, ok := cfg.(s3DisableExpressAuthProvider); ok { - if value, exists = p.GetS3DisableExpressAuth(); exists { - break - } - } - } - return -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go deleted file mode 100644 index cf3ff5966..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go +++ /dev/null @@ -1,42 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - ictx "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/service/internal/checksum" - "github.com/aws/smithy-go/middleware" -) - -type expressDefaultChecksumMiddleware struct{} - -func (*expressDefaultChecksumMiddleware) ID() string { - return "expressDefaultChecksum" -} - -func (*expressDefaultChecksumMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" { - ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32)) - } - return next.HandleFinalize(ctx, in) -} - -// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for -// S3Express requests. This should only be applied to operations where a -// checksum is required (e.g. DeleteObject). -func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error { - err := s.Finalize.Insert( - &expressDefaultChecksumMiddleware{}, - "AWSChecksum:ComputeInputPayloadChecksum", - middleware.Before, - ) - if err != nil { - return fmt.Errorf("add expressDefaultChecksum: %v", err) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go deleted file mode 100644 index 171de4613..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go +++ /dev/null @@ -1,21 +0,0 @@ -package customizations - -import "github.com/aws/smithy-go" - -// GetPropertiesBackend returns a resolved endpoint backend from the property -// set. -func GetPropertiesBackend(p *smithy.Properties) string { - v, _ := p.Get("backend").(string) - return v -} - -// GetIdentityPropertiesBucket returns the S3 bucket from identity properties. -func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) { - v, ok := ip.Get(bucketKey{}).(string) - return v, ok -} - -// SetIdentityPropertiesBucket sets the S3 bucket to identity properties. -func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) { - ip.Set(bucketKey{}, bucket) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go deleted file mode 100644 index 545e5b220..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go +++ /dev/null @@ -1,109 +0,0 @@ -package customizations - -import ( - "context" - "net/http" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/smithy-go/middleware" -) - -const ( - s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express" - headerAmzSessionToken = "x-amz-s3session-token" -) - -// adapts a v4 signer for S3Express -type s3ExpressSignerAdapter struct { - v4 v4.HTTPSigner -} - -// SignHTTP performs S3Express signing on a request, which is identical to -// SigV4 signing save for an additional header containing the S3Express -// session token. -func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error { - r.Header.Set(headerAmzSessionToken, credentials.SessionToken) - optFns = append(optFns, func(o *v4.SignerOptions) { - o.DisableSessionToken = true - }) - return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) -} - -// adapts S3ExpressCredentialsProvider to the standard AWS -// CredentialsProvider interface -type s3ExpressCredentialsAdapter struct { - provider S3ExpressCredentialsProvider - bucket string -} - -func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) { - return c.provider.Retrieve(ctx, c.bucket) -} - -// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests. -// -// This is NOT mutually exclusive with existing v4 or v4a signer handling on -// the stack itself, but only one handler will actually perform signing based -// on the provided signing version in the context. -type S3ExpressSignHTTPRequestMiddleware struct { - Credentials S3ExpressCredentialsProvider - Signer v4.HTTPSigner - LogSigning bool -} - -// ID identifies S3ExpressSignHTTPRequestMiddleware. -func (*S3ExpressSignHTTPRequestMiddleware) ID() string { - return "S3ExpressSigning" -} - -// HandleFinalize will sign the request if the S3Express signer has been -// selected. -func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - if GetSignerVersion(ctx) != s3ExpressSignerVersion { - return next.HandleFinalize(ctx, in) - } - - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: m.credentialsAdapter(ctx), - Signer: m.signerAdapter(), - LogSigning: m.LogSigning, - }) - return mw.HandleFinalize(ctx, in, next) -} - -func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider { - return &s3ExpressCredentialsAdapter{ - provider: m.Credentials, - bucket: GetBucket(ctx), - } -} - -func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner { - return &s3ExpressSignerAdapter{v4: m.Signer} -} - -type s3ExpressPresignerAdapter struct { - v4 v4.HTTPPresigner -} - -// SignHTTP performs S3Express signing on a request, which is identical to -// SigV4 signing save for an additional header containing the S3Express -// session token. -func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) ( - string, http.Header, error, -) { - r.Header.Set(headerAmzSessionToken, credentials.SessionToken) - optFns = append(optFns, func(o *v4.SignerOptions) { - o.DisableSessionToken = true - }) - return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) -} - -var ( - _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{} - _ v4.HTTPSigner = &s3ExpressSignerAdapter{} -) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go deleted file mode 100644 index e3ec7f011..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go +++ /dev/null @@ -1,61 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ExpressSigner signs requests for the sigv4-s3express auth scheme. -// -// This signer respects the aws.auth#sigv4 properties for signing name and -// region. -type ExpressSigner struct { - Signer v4.HTTPSigner - Logger logging.Logger - LogSigning bool -} - -var _ (smithyhttp.Signer) = (*ExpressSigner)(nil) - -// SignRequest signs the request with the provided identity. -func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { - ca, ok := identity.(*internalauthsmithy.CredentialsAdapter) - if !ok { - return fmt.Errorf("unexpected identity type: %T", identity) - } - - name, ok := smithyhttp.GetSigV4SigningName(&props) - if !ok { - return fmt.Errorf("sigv4 signing name is required for s3express variant") - } - - region, ok := smithyhttp.GetSigV4SigningRegion(&props) - if !ok { - return fmt.Errorf("sigv4 signing region is required for s3express variant") - } - - hash := v4.GetPayloadHash(ctx) - - r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { - o.DisableSessionToken = true - - o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) - - o.Logger = v.Logger - o.LogSigning = v.LogSigning - }) - if err != nil { - return fmt.Errorf("sign http: %v", err) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go deleted file mode 100644 index 2b11b1fa2..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go +++ /dev/null @@ -1,74 +0,0 @@ -package customizations - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "strings" - - "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// HandleResponseErrorWith200Status check for S3 200 error response. -// If an s3 200 error is found, status code for the response is modified temporarily to -// 5xx response status code. -func HandleResponseErrorWith200Status(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After) -} - -// middleware to process raw response and look for error response with 200 status code -type processResponseFor200ErrorMiddleware struct{} - -// ID returns the middleware ID. -func (*processResponseFor200ErrorMiddleware) ID() string { - return "S3:ProcessResponseFor200Error" -} - -func (m *processResponseFor200ErrorMiddleware) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - // check if response status code is 2xx. - if response.StatusCode < 200 || response.StatusCode >= 300 { - return - } - - var readBuff bytes.Buffer - body := io.TeeReader(response.Body, &readBuff) - - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("received empty response payload"), - } - } - - // rewind response body - response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body)) - - // if start tag is "Error", the response is consider error response. - if strings.EqualFold(t.Name.Local, "Error") { - // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/ - // 200 error responses are similar to 5xx errors. - response.StatusCode = 500 - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go deleted file mode 100644 index 87f7a2232..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go +++ /dev/null @@ -1,22 +0,0 @@ -package customizations - -import ( - "github.com/aws/smithy-go/transport/http" - "strings" -) - -func updateS3HostForS3AccessPoint(req *http.Request) { - updateHostPrefix(req, "s3", s3AccessPoint) -} - -func updateS3HostForS3ObjectLambda(req *http.Request) { - updateHostPrefix(req, "s3", s3ObjectLambda) -} - -func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) { - host := req.URL.Host - if strings.HasPrefix(host, oldEndpointPrefix) { - // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix - req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go deleted file mode 100644 index f4bbb4b6d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go +++ /dev/null @@ -1,49 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// AddExpiresOnPresignedURL represents a build middleware used to assign -// expiration on a presigned URL. -type AddExpiresOnPresignedURL struct { - - // Expires is time.Duration within which presigned url should be expired. - // This should be the duration in seconds the presigned URL should be considered valid for. - // By default the S3 presigned url expires in 15 minutes ie. 900 seconds. - Expires time.Duration -} - -// ID representing the middleware -func (*AddExpiresOnPresignedURL) ID() string { - return "S3:AddExpiresOnPresignedURL" -} - -// HandleBuild handles the build step middleware behavior -func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - // if expiration is unset skip this middleware - if m.Expires == 0 { - // default to 15 * time.Minutes - m.Expires = 15 * time.Minute - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", req) - } - - // set S3 X-AMZ-Expires header - query := req.URL.Query() - query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10)) - req.URL.RawQuery = query.Encode() - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go deleted file mode 100644 index bbc971f2a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go +++ /dev/null @@ -1,568 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "net/url" - "strings" - - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn" - s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn" - "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" -) - -const ( - s3AccessPoint = "s3-accesspoint" - s3ObjectLambda = "s3-object-lambda" -) - -// processARNResource is used to process an ARN resource. -type processARNResource struct { - - // UseARNRegion indicates if region parsed from an ARN should be used. - UseARNRegion bool - - // UseAccelerate indicates if s3 transfer acceleration is enabled - UseAccelerate bool - - // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver - EndpointResolver EndpointResolver - - // EndpointResolverOptions used by endpoint resolver - EndpointResolverOptions EndpointResolverOptions - - // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled - DisableMultiRegionAccessPoints bool -} - -// ID returns the middleware ID. -func (*processARNResource) ID() string { return "S3:ProcessARNResource" } - -func (m *processARNResource) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // check if arn was provided, if not skip this middleware - arnValue, ok := s3shared.GetARNResourceFromContext(ctx) - if !ok { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // parse arn into an endpoint arn wrt to service - resource, err := s3arn.ParseEndpointARN(arnValue) - if err != nil { - return out, metadata, err - } - - // build a resource request struct - resourceRequest := s3shared.ResourceRequest{ - Resource: resource, - UseARNRegion: m.UseARNRegion, - UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled, - RequestRegion: awsmiddleware.GetRegion(ctx), - SigningRegion: awsmiddleware.GetSigningRegion(ctx), - PartitionID: awsmiddleware.GetPartitionID(ctx), - } - - // switch to correct endpoint updater - switch tv := resource.(type) { - case arn.AccessPointARN: - // multi-region arns do not need to validate for cross partition request - if len(tv.Region) != 0 { - // validate resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - } - - // Special handling for region-less ap-arns. - if len(tv.Region) == 0 { - // check if multi-region arn support is disabled - if m.DisableMultiRegionAccessPoints { - return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled") - } - - // Do not allow dual-stack configuration with multi-region arns. - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // fetch arn region to resolve request - resolveRegion := tv.Region - // check if request region is FIPS - if resourceRequest.UseFIPS && len(resolveRegion) == 0 { - // Do not allow Fips support within multi-region arns. - return out, metadata, s3shared.NewClientConfiguredForFIPSError( - tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - var requestBuilder func(context.Context, accesspointOptions) (context.Context, error) - if len(resolveRegion) == 0 { - requestBuilder = buildMultiRegionAccessPointsRequest - } else { - requestBuilder = buildAccessPointRequest - } - - // build request as per accesspoint builder - ctx, err = requestBuilder(ctx, accesspointOptions{ - processARNResource: *m, - request: req, - resource: tv, - resolveRegion: resolveRegion, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - case arn.S3ObjectLambdaAccessPointARN: - // validate region for resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if dualstack - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // fetch arn region to resolve request - resolveRegion := tv.Region - - // build access point request - ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{ - processARNResource: *m, - request: req, - resource: tv.AccessPointARN, - resolveRegion: resolveRegion, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - // process outpost accesspoint ARN - case arn.OutpostAccessPointARN: - // validate region for resource request - if err := validateRegionForResourceRequest(resourceRequest); err != nil { - return out, metadata, err - } - - // check if accelerate - if m.UseAccelerate { - return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if dual stack - if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if request region is FIPS - if resourceRequest.UseFIPS { - return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID, - resourceRequest.RequestRegion, nil) - } - - // build outpost access point request - ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{ - processARNResource: *m, - resource: tv, - request: req, - partitionID: resourceRequest.PartitionID, - requestRegion: resourceRequest.RequestRegion, - }) - if err != nil { - return out, metadata, err - } - - default: - return out, metadata, s3shared.NewInvalidARNError(resource, nil) - } - - return next.HandleSerialize(ctx, in) -} - -// validate if s3 resource and request region config is compatible. -func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error { - // check if resourceRequest leads to a cross partition error - v, err := resourceRequest.IsCrossPartition() - if err != nil { - return err - } - if v { - // if cross partition - return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - // check if resourceRequest leads to a cross region error - if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() { - // if cross region, but not use ARN region is not enabled - return s3shared.NewClientRegionMismatchError(resourceRequest.Resource, - resourceRequest.PartitionID, resourceRequest.RequestRegion, nil) - } - - return nil -} - -// === Accesspoint ========== - -type accesspointOptions struct { - processARNResource - request *http.Request - resource arn.AccessPointARN - resolveRegion string - partitionID string - requestRegion string -} - -func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - tv := options.resource - req := options.request - resolveRegion := options.resolveRegion - - resolveService := tv.Service - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region - - // resolve endpoint - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - // Must sign with s3-object-lambda - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to "s3-accesspoint" - ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - updateS3HostForS3AccessPoint(req) - - ctx, err = buildAccessPointHostPrefix(ctx, req, tv) - if err != nil { - return ctx, err - } - - return ctx, nil -} - -func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - tv := options.resource - req := options.request - resolveRegion := options.resolveRegion - - resolveService := tv.Service - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region - - // resolve endpoint - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - // Must sign with s3-object-lambda - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to "s3-object-lambda" - ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - if endpoint.Source == aws.EndpointSourceServiceMetadata { - updateS3HostForS3ObjectLambda(req) - } - - ctx, err = buildAccessPointHostPrefix(ctx, req, tv) - if err != nil { - return ctx, err - } - - return ctx, nil -} - -func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) { - const s3GlobalLabel = "s3-global." - const accesspointLabel = "accesspoint." - - tv := options.resource - req := options.request - resolveService := tv.Service - resolveRegion := options.requestRegion - arnPartition := tv.Partition - - // resolve endpoint - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // set signing region and version for MRAP - endpoint.SigningRegion = "*" - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = SetSignerVersion(ctx, v4a.Version) - - if len(endpoint.SigningName) != 0 { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - // skip arn processing, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - // modify endpoint host to use s3-global host prefix - scheme := strings.SplitN(endpoint.URL, "://", 2) - dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero) - if err != nil { - return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err) - } - // set url as per partition - endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - // build access point host prefix - accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel - - // add host prefix to url - req.URL.Host = accessPointHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = accessPointHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv) - } - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - return ctx, nil -} - -func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) { - // add host prefix for access point - accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." - req.URL.Host = accessPointHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = accessPointHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, s3shared.NewInvalidARNError(tv, err) - } - - return ctx, nil -} - -// ====== Outpost Accesspoint ======== - -type outpostAccessPointOptions struct { - processARNResource - request *http.Request - resource arn.OutpostAccessPointARN - partitionID string - requestRegion string -} - -func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) { - tv := options.resource - req := options.request - - resolveRegion := tv.Region - resolveService := tv.Service - endpointsID := resolveService - if strings.EqualFold(resolveService, "s3-outposts") { - // assign endpoints ID as "S3" - endpointsID = "s3" - } - - ero := options.EndpointResolverOptions - ero.Logger = middleware.GetLogger(ctx) - ero.ResolvedRegion = "" - - // resolve regional endpoint for resolved region. - endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero) - if err != nil { - return ctx, s3shared.NewFailedToResolveEndpointError( - tv, - options.partitionID, - options.requestRegion, - err, - ) - } - - // assign resolved endpoint url to request url - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - // assign resolved service from arn as signing name - if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, resolveService) - } - - if len(endpoint.SigningRegion) != 0 { - // redirect signer to use resolved endpoint signing name and region - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion) - } - - // update serviceID to resolved service id - ctx = awsmiddleware.SetServiceID(ctx, resolveService) - - // disable host prefix behavior - ctx = http.DisableEndpointHostPrefix(ctx, true) - - // remove the serialized arn in place of /{Bucket} - ctx = setBucketToRemoveOnContext(ctx, tv.String()) - - // skip further customizations, if arn region resolves to a immutable endpoint - if endpoint.HostnameImmutable { - return ctx, nil - } - - updateHostPrefix(req, endpointsID, resolveService) - - // add host prefix for s3-outposts - outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "." - req.URL.Host = outpostAPHostPrefix + req.URL.Host - if len(req.Host) > 0 { - req.Host = outpostAPHostPrefix + req.Host - } - - // validate the endpoint host - if err := http.ValidateEndpointHost(req.URL.Host); err != nil { - return ctx, s3shared.NewInvalidARNError(tv, err) - } - - return ctx, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go deleted file mode 100644 index cf3f4dc8b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go +++ /dev/null @@ -1,63 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" -) - -// removeBucketFromPathMiddleware needs to be executed after serialize step is performed -type removeBucketFromPathMiddleware struct { -} - -func (m *removeBucketFromPathMiddleware) ID() string { - return "S3:RemoveBucketFromPathMiddleware" -} - -func (m *removeBucketFromPathMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // check if a bucket removal from HTTP path is required - bucket, ok := getRemoveBucketFromPath(ctx) - if !ok { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - removeBucketFromPath(req.URL, bucket) - return next.HandleSerialize(ctx, in) -} - -type removeBucketKey struct { - bucket string -} - -// setBucketToRemoveOnContext sets the bucket name to be removed. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context { - return middleware.WithStackValue(ctx, removeBucketKey{}, bucket) -} - -// getRemoveBucketFromPath returns the bucket name to remove from the path. -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func getRemoveBucketFromPath(ctx context.Context) (string, bool) { - v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string) - return v, ok -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go deleted file mode 100644 index 6e1d44724..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go +++ /dev/null @@ -1,88 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "net/url" -) - -type s3ObjectLambdaEndpoint struct { - // whether the operation should use the s3-object-lambda endpoint - UseEndpoint bool - - // use transfer acceleration - UseAccelerate bool - - EndpointResolver EndpointResolver - EndpointResolverOptions EndpointResolverOptions -} - -func (t *s3ObjectLambdaEndpoint) ID() string { - return "S3:ObjectLambdaEndpoint" -} - -func (t *s3ObjectLambdaEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - if !t.UseEndpoint { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request) - } - - if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { - return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation") - } - - if t.UseAccelerate { - return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation") - } - - region := awsmiddleware.GetRegion(ctx) - - ero := t.EndpointResolverOptions - - endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero) - if err != nil { - return out, metadata, err - } - - // Set the ServiceID and SigningName - ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda) - - if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom { - ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName) - } else { - ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, err - } - - if len(endpoint.SigningRegion) > 0 { - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - } else { - ctx = awsmiddleware.SetSigningRegion(ctx, region) - } - - if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable { - updateS3HostForS3ObjectLambda(req) - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go deleted file mode 100644 index 756823cb7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go +++ /dev/null @@ -1,227 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - "github.com/aws/smithy-go/middleware" -) - -type signerVersionKey struct{} - -// GetSignerVersion retrieves the signer version to use for signing -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func GetSignerVersion(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string) - return v -} - -// SetSignerVersion sets the signer version to be used for signing the request -// -// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues -// to clear all stack values. -func SetSignerVersion(ctx context.Context, version string) context.Context { - return middleware.WithStackValue(ctx, signerVersionKey{}, version) -} - -// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. -type SignHTTPRequestMiddlewareOptions struct { - - // credential provider - CredentialsProvider aws.CredentialsProvider - - // log signing - LogSigning bool - - // v4 signer - V4Signer v4.HTTPSigner - - //v4a signer - V4aSigner v4a.HTTPSigner -} - -// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests -func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { - return &SignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - v4Signer: options.V4Signer, - v4aSigner: options.V4aSigner, - logSigning: options.LogSigning, - } -} - -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method -type SignHTTPRequestMiddleware struct { - - // credential provider - credentialsProvider aws.CredentialsProvider - - // log signing - logSigning bool - - // v4 signer - v4Signer v4.HTTPSigner - - //v4a signer - v4aSigner v4a.HTTPSigner -} - -// ID is the SignHTTPRequestMiddleware identifier -func (s *SignHTTPRequestMiddleware) ID() string { - return "Signing" -} - -// HandleFinalize will take the provided input and handle signing for either -// SigV4 or SigV4A as called for. -func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - sv := GetSignerVersion(ctx) - - if strings.EqualFold(sv, v4.Version) { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: s.credentialsProvider, - Signer: s.v4Signer, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - } else if strings.EqualFold(sv, v4a.Version) { - v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) - if !ok { - return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") - } - - mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{ - Credentials: v4aCredentialProvider, - Signer: s.v4aSigner, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - } - - return next.HandleFinalize(ctx, in) -} - -// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) { - const signedID = "Signing" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} - -// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. -type PresignHTTPRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - ExpressCredentials S3ExpressCredentialsProvider - V4Presigner v4.HTTPPresigner - V4aPresigner v4a.HTTPPresigner - LogSigning bool -} - -// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a -// presigned URL for an HTTP request. -// -// Will short circuit the middleware stack and not forward onto the next -// Finalize handler. -type PresignHTTPRequestMiddleware struct { - - // cred provider and signer for sigv4 - credentialsProvider aws.CredentialsProvider - - // s3Express credentials - expressCredentials S3ExpressCredentialsProvider - - // sigV4 signer - v4Signer v4.HTTPPresigner - - // sigV4a signer - v4aSigner v4a.HTTPPresigner - - // log signing - logSigning bool -} - -// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests -func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { - return &PresignHTTPRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - expressCredentials: options.ExpressCredentials, - v4Signer: options.V4Presigner, - v4aSigner: options.V4aPresigner, - logSigning: options.LogSigning, - } -} - -// ID provides the middleware ID. -func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 or SigV4a presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (p *PresignHTTPRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - // fetch signer type from context - signerVersion := GetSignerVersion(ctx) - - switch signerVersion { - case "aws.auth#sigv4a": - mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: &v4a.SymmetricCredentialAdaptor{ - SymmetricProvider: p.credentialsProvider, - }, - Presigner: p.v4aSigner, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - case "aws.auth#sigv4": - mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: p.credentialsProvider, - Presigner: p.v4Signer, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - case s3ExpressSignerVersion: - mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: &s3ExpressCredentialsAdapter{ - provider: p.expressCredentials, - bucket: GetBucket(ctx), - }, - Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer}, - LogSigning: p.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) - default: - return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion) - } -} - -// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already -// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the -// finalize step. -func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) { - const signedID = "PresignHTTPRequest" - _, present := stack.Finalize.Get(signedID) - if present { - _, err = stack.Finalize.Swap(signedID, signingMiddleware) - } else { - err = stack.Finalize.Add(signingMiddleware, middleware.After) - } - return err -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go deleted file mode 100644 index eedfc7eef..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go +++ /dev/null @@ -1,310 +0,0 @@ -package customizations - -import ( - "context" - "fmt" - "log" - "net/url" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/internal/s3shared" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// UpdateEndpointParameterAccessor represents accessor functions used by the middleware -type UpdateEndpointParameterAccessor struct { - // functional pointer to fetch bucket name from provided input. - // The function is intended to take an input value, and - // return a string pointer to value of string, and bool if - // input has no bucket member. - GetBucketFromInput func(interface{}) (*string, bool) -} - -// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup. -type UpdateEndpointOptions struct { - // Accessor are parameter accessors used by the middleware - Accessor UpdateEndpointParameterAccessor - - // use path style - UsePathStyle bool - - // use transfer acceleration - UseAccelerate bool - - // indicates if an operation supports s3 transfer acceleration. - SupportsAccelerate bool - - // use ARN region - UseARNRegion bool - - // Indicates that the operation should target the s3-object-lambda endpoint. - // Used to direct operations that do not route based on an input ARN. - TargetS3ObjectLambda bool - - // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver - EndpointResolver EndpointResolver - - // EndpointResolverOptions used by endpoint resolver - EndpointResolverOptions EndpointResolverOptions - - // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled - DisableMultiRegionAccessPoints bool -} - -// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions. -func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) { - const serializerID = "OperationSerializer" - - // initial arn look up middleware - err = stack.Initialize.Insert(&s3shared.ARNLookup{ - GetARNValue: options.Accessor.GetBucketFromInput, - }, "legacyEndpointContextSetter", middleware.After) - if err != nil { - return err - } - - // process arn - err = stack.Serialize.Insert(&processARNResource{ - UseARNRegion: options.UseARNRegion, - UseAccelerate: options.UseAccelerate, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointResolverOptions, - DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, - }, serializerID, middleware.Before) - if err != nil { - return err - } - - // process whether the operation requires the s3-object-lambda endpoint - // Occurs before operation serializer so that hostPrefix mutations - // can be handled correctly. - err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{ - UseEndpoint: options.TargetS3ObjectLambda, - UseAccelerate: options.UseAccelerate, - EndpointResolver: options.EndpointResolver, - EndpointResolverOptions: options.EndpointResolverOptions, - }, serializerID, middleware.Before) - if err != nil { - return err - } - - // remove bucket arn middleware - err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After) - if err != nil { - return err - } - - // update endpoint to use options for path style and accelerate - err = stack.Serialize.Insert(&updateEndpoint{ - usePathStyle: options.UsePathStyle, - getBucketFromInput: options.Accessor.GetBucketFromInput, - useAccelerate: options.UseAccelerate, - supportsAccelerate: options.SupportsAccelerate, - }, serializerID, middleware.After) - if err != nil { - return err - } - - return err -} - -type updateEndpoint struct { - // path style options - usePathStyle bool - getBucketFromInput func(interface{}) (*string, bool) - - // accelerate options - useAccelerate bool - supportsAccelerate bool -} - -// ID returns the middleware ID. -func (*updateEndpoint) ID() string { - return "S3:UpdateEndpoint" -} - -func (u *updateEndpoint) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - // if arn was processed, skip this middleware - if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { - return next.HandleSerialize(ctx, in) - } - - // skip this customization if host name is set as immutable - if smithyhttp.GetHostnameImmutable(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // check if accelerate is supported - if u.useAccelerate && !u.supportsAccelerate { - // accelerate is not supported, thus will be ignored - log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.") - u.useAccelerate = false - } - - // transfer acceleration is not supported with path style urls - if u.useAccelerate && u.usePathStyle { - log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.") - u.usePathStyle = false - } - - if u.getBucketFromInput != nil { - // Below customization only apply if bucket name is provided - bucket, ok := u.getBucketFromInput(in.Parameters) - if ok && bucket != nil { - region := awsmiddleware.GetRegion(ctx) - if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil { - return out, metadata, err - } - } - } - - return next.HandleSerialize(ctx, in) -} - -func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error { - // do nothing if path style is enforced - if u.usePathStyle { - return nil - } - - if !hostCompatibleBucketName(req.URL, bucket) { - // bucket name must be valid to put into the host for accelerate operations. - // For non-accelerate operations the bucket name can stay in the path if - // not valid hostname. - var err error - if u.useAccelerate { - err = fmt.Errorf("bucket name %s is not compatible with S3", bucket) - } - - // No-Op if not using accelerate. - return err - } - - // accelerate is only supported if use path style is disabled - if u.useAccelerate { - parts := strings.Split(req.URL.Host, ".") - if len(parts) < 3 { - return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host) - } - - if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { - parts[0] = "s3-accelerate" - } - - for i := 1; i+1 < len(parts); i++ { - if strings.EqualFold(parts[i], region) { - parts = append(parts[:i], parts[i+1:]...) - break - } - } - - // construct the url host - req.URL.Host = strings.Join(parts, ".") - } - - // move bucket to follow virtual host style - moveBucketNameToHost(req.URL, bucket) - return nil -} - -// updates endpoint to use virtual host styling -func moveBucketNameToHost(u *url.URL, bucket string) { - u.Host = bucket + "." + u.Host - removeBucketFromPath(u, bucket) -} - -// remove bucket from url -func removeBucketFromPath(u *url.URL, bucket string) { - if strings.HasPrefix(u.Path, "/"+bucket) { - // modify url path - u.Path = strings.Replace(u.Path, "/"+bucket, "", 1) - - // modify url raw path - u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1) - } - - if u.Path == "" { - u.Path = "/" - } - - if u.RawPath == "" { - u.RawPath = "/" - } -} - -// hostCompatibleBucketName returns true if the request should -// put the bucket in the host. This is false if the bucket is not -// DNS compatible or the EndpointResolver resolves an aws.Endpoint with -// HostnameImmutable member set to true. -// -// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable -func hostCompatibleBucketName(u *url.URL, bucket string) bool { - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if u.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - if strings.Contains(bucket, "..") { - return false - } - - // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping - if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) { - return false - } - - for _, c := range bucket[1:] { - if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) { - return false - } - } - - // checks for `^(\d+\.){3}\d+$` IPaddressing - v := strings.SplitN(bucket, ".", -1) - if len(v) == 4 { - for _, c := range bucket { - if !((c > 47 && c < 58) || c == 46) { - // we confirm that this is not a IP address - return true - } - } - // this is a IP address - return false - } - - return true -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go deleted file mode 100644 index 8b5f10fd8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ /dev/null @@ -1,1083 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" - "strings" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver S3 endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "af-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.af-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-south-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "aws-global", - }: endpoints.Endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ca-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.ca-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-central-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-north-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-north-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-south-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-3", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.eu-west-3.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "fips-ca-central-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-ca-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-east-2", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-west-2", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "il-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.il-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.me-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-south-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.me-south-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "mx-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "mx-central-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.mx-central-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "s3-external-1", - }: endpoints.Endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-north-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-iso-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-iso-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", - }, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.sc2s.sgov.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-isob-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: endpoints.CredentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", - }, - endpoints.EndpointKey{ - Region: "us-isob-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "s3-fips.dualstack.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - { - Variant: 0, - }: { - Hostname: "s3.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "fips-us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "fips-us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.DualStackVariant, - }: { - Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, -} - -// GetDNSSuffix returns the dnsSuffix URL component for the given partition id -func GetDNSSuffix(id string, options Options) (string, error) { - variant := transformToSharedOptions(options).GetEndpointVariant() - switch { - case strings.EqualFold(id, "aws"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "amazonaws.com", nil - - case 0: - return "amazonaws.com", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-cn"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com.cn", nil - - case endpoints.FIPSVariant: - return "amazonaws.com.cn", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "api.amazonwebservices.com.cn", nil - - case 0: - return "amazonaws.com.cn", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso"): - switch variant { - case endpoints.FIPSVariant: - return "c2s.ic.gov", nil - - case 0: - return "c2s.ic.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-b"): - switch variant { - case endpoints.FIPSVariant: - return "sc2s.sgov.gov", nil - - case 0: - return "sc2s.sgov.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-e"): - switch variant { - case endpoints.FIPSVariant: - return "cloud.adc-e.uk", nil - - case 0: - return "cloud.adc-e.uk", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-iso-f"): - switch variant { - case endpoints.FIPSVariant: - return "csp.hci.ic.gov", nil - - case 0: - return "csp.hci.ic.gov", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - case strings.EqualFold(id, "aws-us-gov"): - switch variant { - case endpoints.DualStackVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant: - return "amazonaws.com", nil - - case endpoints.FIPSVariant | endpoints.DualStackVariant: - return "amazonaws.com", nil - - case 0: - return "amazonaws.com", nil - - default: - return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) - - } - - default: - return "", fmt.Errorf("unknown partition") - - } -} - -// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and -// options. -func GetDNSSuffixFromRegion(region string, options Options) (string, error) { - switch { - case partitionRegexp.Aws.MatchString(region): - return GetDNSSuffix("aws", options) - - case partitionRegexp.AwsCn.MatchString(region): - return GetDNSSuffix("aws-cn", options) - - case partitionRegexp.AwsIso.MatchString(region): - return GetDNSSuffix("aws-iso", options) - - case partitionRegexp.AwsIsoB.MatchString(region): - return GetDNSSuffix("aws-iso-b", options) - - case partitionRegexp.AwsIsoE.MatchString(region): - return GetDNSSuffix("aws-iso-e", options) - - case partitionRegexp.AwsIsoF.MatchString(region): - return GetDNSSuffix("aws-iso-f", options) - - case partitionRegexp.AwsUsGov.MatchString(region): - return GetDNSSuffix("aws-us-gov", options) - - default: - return GetDNSSuffix("aws", options) - - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go deleted file mode 100644 index 6b98e8802..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ /dev/null @@ -1,335 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - "github.com/aws/aws-sdk-go-v2/internal/v4a" - s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: - // 100-continue} header. Setting to -1 will disable adding the Expect header to - // requests; setting to 0 will set the threshold to default 2MB - ContinueHeaderThresholdBytes int64 - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // Allows you to disable S3 Multi-Region access points feature. - DisableMultiRegionAccessPoints bool - - // Disables this client's usage of Session Auth for S3Express buckets and reverts - // to using conventional SigV4 for those. - DisableS3ExpressSessionAuth *bool - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // The credentials provider for S3Express requests. - ExpressCredentials ExpressCredentialsProvider - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // Indicates how user opt-in/out request checksum calculation - RequestChecksumCalculation aws.RequestChecksumCalculation - - // Indicates how user opt-in/out response checksum validation - ResponseChecksumValidation aws.ResponseChecksumValidation - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // Allows you to enable arn region support for the service. - UseARNRegion bool - - // Allows you to enable S3 Accelerate feature. All operations compatible with S3 - // Accelerate will use the accelerate endpoint for requests. Requests not - // compatible will fall back to normal S3 requests. The bucket must be enabled for - // accelerate to be used with S3 client with accelerate enabled. If the bucket is - // not enabled for accelerate an error will be returned. The bucket name must be - // DNS compatible to work with accelerate. - UseAccelerate bool - - // Allows you to enable dual-stack endpoint support for the service. - // - // Deprecated: Set dual-stack by setting UseDualStackEndpoint on - // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint - // field is set it overrides this field value. - UseDualstack bool - - // Allows you to enable the client to use path-style addressing, i.e., - // https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual - // hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ). - UsePathStyle bool - - // Signature Version 4a (SigV4a) Signer - httpSignerV4a httpSignerV4a - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "com.amazonaws.s3#sigv4express" { - return getExpressIdentityResolver(o) - } - if schemeID == "aws.auth#sigv4a" { - return getSigV4AIdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func getSigV4AIdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &v4a.CredentialsProviderAdapter{ - Provider: &v4a.SymmetricCredentialAdaptor{ - SymmetricProvider: o.Credentials, - }, - } - } - return nil -} - -// WithSigV4ASigningRegions applies an override to the authentication workflow to -// use the given signing region set for SigV4A-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region set from both auth scheme resolution and endpoint -// resolution. -func WithSigV4ASigningRegions(regions []string) func(*Options) { - fn := func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, - ) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, regions) - return next.HandleFinalize(ctx, in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Finalize.Insert( - middleware.FinalizeMiddlewareFunc("withSigV4ASigningRegions", fn), - "Signing", - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} - -func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver { - if o.ExpressCredentials != nil { - return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials} - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go deleted file mode 100644 index 491ed2e5d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go +++ /dev/null @@ -1,419 +0,0 @@ -package s3 - -import ( - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -const ( - algorithmHeader = "X-Amz-Algorithm" - credentialHeader = "X-Amz-Credential" - dateHeader = "X-Amz-Date" - tokenHeader = "X-Amz-Security-Token" - signatureHeader = "X-Amz-Signature" - - algorithm = "AWS4-HMAC-SHA256" - aws4Request = "aws4_request" - bucketHeader = "bucket" - defaultExpiresIn = 15 * time.Minute - shortDateLayout = "20060102" -) - -// PresignPostObject is a special kind of [presigned request] used to send a request using -// form data, likely from an HTML form on a browser. -// Unlike other presigned operations, the return values of this function are not meant to be used directly -// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information -// on how to use these values -// -// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html -// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html -func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) { - if params == nil { - params = &PutObjectInput{} - } - clientOptions := c.options.copy() - options := PresignPostOptions{ - Expires: clientOptions.Expires, - PostPresigner: &postSignAdapter{}, - } - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption) - cvt := presignPostConverter(options) - result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns, - c.client.addOperationPutObjectMiddlewares, - cvt.ConvertToPresignMiddleware, - func(stack *middleware.Stack, options Options) error { - return awshttp.RemoveContentTypeHeader(stack) - }, - ) - if err != nil { - return nil, err - } - - out := result.(*PresignedPostRequest) - return out, nil -} - -// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData -type PresignedPostRequest struct { - // Represents the Base URL to make a request to - URL string - // Values is a key-value map of values to be sent as FormData - // these values are not encoded - Values map[string]string -} - -// postSignAdapter adapter to implement the presignPost interface -type postSignAdapter struct{} - -// PresignPost creates a special kind of [presigned request] -// to be used with HTTP verb POST. -// It differs from PUT request mostly on -// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to -// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own -// 3. There's no body to be signed, since that will be attached when the actual request is made -// 4. The signature is made based on the policy document, not the whole request -// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html -// -// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html -func (s *postSignAdapter) PresignPost( - credentials aws.Credentials, - bucket string, key string, - region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions), -) (fields map[string]string, err error) { - credentialScope := buildCredentialScope(signingTime, region, service) - credentialStr := credentials.AccessKeyID + "/" + credentialScope - - policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions) - if err != nil { - return nil, err - } - - signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime) - - fields = getPostSignRequiredFields(signingTime, credentialStr, credentials) - fields[signatureHeader] = signature - fields["key"] = key - fields["policy"] = policyDoc - - return fields, nil -} - -func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string { - fields := map[string]string{ - algorithmHeader: algorithm, - dateHeader: t.UTC().Format("20060102T150405Z"), - credentialHeader: credentialStr, - } - - sessionToken := awsCredentials.SessionToken - if len(sessionToken) > 0 { - fields[tokenHeader] = sessionToken - } - - return fields -} - -// PresignPost defines the interface to presign a POST request -type PresignPost interface { - PresignPost( - credentials aws.Credentials, - bucket string, key string, - region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (fields map[string]string, err error) -} - -// PresignPostOptions represent the options to be passed to a PresignPost sign request -type PresignPostOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // PostPresigner to use. One will be created if none is provided - PostPresigner PresignPost - - // Expires sets the expiration duration for the generated presign url. This should - // be the duration in seconds the presigned URL should be considered valid for. If - // not set or set to zero, presign url would default to expire after 900 seconds. - Expires time.Duration - - // Conditions a list of extra conditions to pass to the policy document - // Available conditions can be found [here] - // - // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions - Conditions []interface{} -} - -type presignPostConverter PresignPostOptions - -// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware. -type presignPostRequestMiddlewareOptions struct { - CredentialsProvider aws.CredentialsProvider - Presigner PresignPost - LogSigning bool - ExpiresIn time.Duration - Conditions []interface{} -} - -type presignPostRequestMiddleware struct { - credentialsProvider aws.CredentialsProvider - presigner PresignPost - logSigning bool - expiresIn time.Duration - conditions []interface{} -} - -// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware -// initialized with the presigner. -func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware { - return &presignPostRequestMiddleware{ - credentialsProvider: options.CredentialsProvider, - presigner: options.Presigner, - logSigning: options.LogSigning, - expiresIn: options.ExpiresIn, - conditions: options.Conditions, - } -} - -// ID provides the middleware ID. -func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" } - -// HandleFinalize will take the provided input and create a presigned url for -// the http request using the SigV4 presign authentication scheme. -// -// Since the signed request is not a valid HTTP request -func (s *presignPostRequestMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - - input := getOperationInput(ctx) - asS3Put, ok := input.(*PutObjectInput) - if !ok { - return out, metadata, fmt.Errorf("expected PutObjectInput") - } - bucketName, ok := asS3Put.bucket() - if !ok { - return out, metadata, fmt.Errorf("requested input bucketName not found on request") - } - uploadKey := asS3Put.Key - if uploadKey == nil { - return out, metadata, fmt.Errorf("PutObject input does not have a key input") - } - - uri := getS3ResolvedURI(ctx) - - signingName := awsmiddleware.GetSigningName(ctx) - signingRegion := awsmiddleware.GetSigningRegion(ctx) - - credentials, err := s.credentialsProvider.Retrieve(ctx) - if err != nil { - return out, metadata, &v4.SigningError{ - Err: fmt.Errorf("failed to retrieve credentials: %w", err), - } - } - skew := internalcontext.GetAttemptSkewContext(ctx) - signingTime := sdk.NowTime().Add(skew) - expirationTime := signingTime.Add(s.expiresIn).UTC() - - fields, err := s.presigner.PresignPost( - credentials, - bucketName, - *uploadKey, - signingRegion, - signingName, - signingTime, - s.conditions, - expirationTime, - func(o *v4.SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) - if err != nil { - return out, metadata, &v4.SigningError{ - Err: fmt.Errorf("failed to sign http request, %w", err), - } - } - - out.Result = &PresignedPostRequest{ - URL: uri, - Values: fields, - } - - return out, metadata, nil -} - -// Adapted from existing PresignConverter middleware -func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - stack.Build.Remove("UserAgent") - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - stack.Deserialize.Clear() - - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - // if no expiration is set, set one - expiresIn := c.Expires - if expiresIn == 0 { - expiresIn = defaultExpiresIn - } - - pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.PostPresigner, - LogSigning: options.ClientLogMode.IsSigning(), - ExpiresIn: expiresIn, - Conditions: c.Conditions, - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) { - initialConditions := []interface{}{ - map[string]string{ - algorithmHeader: algorithm, - }, - map[string]string{ - bucketHeader: bucket, - }, - map[string]string{ - credentialHeader: credentialString, - }, - map[string]string{ - dateHeader: signingTime.UTC().Format("20060102T150405Z"), - }, - } - - var conditions []interface{} - for _, v := range initialConditions { - conditions = append(conditions, v) - } - - if securityToken != nil && *securityToken != "" { - conditions = append(conditions, map[string]string{ - tokenHeader: *securityToken, - }) - } - - // append user-defined conditions at the end - conditions = append(conditions, extraConditions...) - - // The policy allows you to set a "key" value to specify what's the name of the - // key to add. Customers can add one by specifying one in their conditions, - // so we're checking if one has already been set. - // If none is found, restrict this to just the key name passed on the request - // This can be disabled by adding a condition that explicitly allows - // everything - if !isAlreadyCheckingForKey(conditions) { - conditions = append(conditions, map[string]string{"key": key}) - } - - policyDoc := map[string]interface{}{ - "conditions": conditions, - "expiration": expirationTime.Format(time.RFC3339), - } - - jsonBytes, err := json.Marshal(policyDoc) - if err != nil { - return "", err - } - - return base64.StdEncoding.EncodeToString(jsonBytes), nil -} - -func isAlreadyCheckingForKey(conditions []interface{}) bool { - // Need to check for two conditions: - // 1. A condition of the form ["starts-with", "$key", "mykey"] - // 2. A condition of the form {"key": "mykey"} - for _, c := range conditions { - slice, ok := c.([]interface{}) - if ok && len(slice) > 1 { - if slice[0] == "starts-with" && slice[1] == "$key" { - return true - } - } - m, ok := c.(map[string]interface{}) - if ok && len(m) > 0 { - for k := range m { - if k == "key" { - return true - } - } - } - // Repeat this but for map[string]string due to type constrains - ms, ok := c.(map[string]string) - if ok && len(ms) > 0 { - for k := range ms { - if k == "key" { - return true - } - } - } - } - return false -} - -// these methods have been copied from v4 implementation since they are not exported for public use -func hmacsha256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func buildSignature(strToSign, secret, service, region string, t time.Time) string { - key := deriveKey(secret, service, region, t) - return hex.EncodeToString(hmacsha256(key, []byte(strToSign))) -} - -func deriveKey(secret, service, region string, t time.Time) []byte { - hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout))) - hmacRegion := hmacsha256(hmacDate, []byte(region)) - hmacService := hmacsha256(hmacRegion, []byte(service)) - return hmacsha256(hmacService, []byte(aws4Request)) -} - -func buildCredentialScope(signingTime time.Time, region, service string) string { - return strings.Join([]string{ - signingTime.UTC().Format(shortDateLayout), - region, - service, - aws4Request, - }, "/") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go deleted file mode 100644 index 4e34d1a22..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go +++ /dev/null @@ -1,77 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - "path" - - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// serializeImmutableHostnameBucketMiddleware handles injecting the bucket name into -// "immutable" hostnames resolved via v1 EndpointResolvers. This CANNOT be done in -// serialization, since v2 endpoint resolution requires removing the {Bucket} path -// segment from all S3 requests. -// -// This will only be done for non-ARN buckets, as the features that use those require -// virtualhost manipulation to function and we previously (pre-ep2) expected the caller -// to handle that in their resolver. -type serializeImmutableHostnameBucketMiddleware struct { - UsePathStyle bool -} - -func (*serializeImmutableHostnameBucketMiddleware) ID() string { - return "serializeImmutableHostnameBucket" -} - -func (m *serializeImmutableHostnameBucketMiddleware) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - bucket, ok := bucketFromInput(in.Parameters) - if !ok { - return next.HandleSerialize(ctx, in) - } - - // a bucket being un-vhostable will also force us to use path style - usePathStyle := m.UsePathStyle || !awsrulesfn.IsVirtualHostableS3Bucket(bucket, request.URL.Scheme != "https") - - if !smithyhttp.GetHostnameImmutable(ctx) && - !(awsmiddleware.GetRequiresLegacyEndpoints(ctx) && usePathStyle) { - return next.HandleSerialize(ctx, in) - } - - parsedBucket := awsrulesfn.ParseARN(bucket) - - // disallow ARN buckets except for MRAP arns - if parsedBucket != nil && len(parsedBucket.Region) > 0 { - return next.HandleSerialize(ctx, in) - } - - request.URL.Path = path.Join(request.URL.Path, bucket) - request.URL.RawPath = path.Join(request.URL.RawPath, httpbinding.EscapePath(bucket, true)) - - return next.HandleSerialize(ctx, in) -} - -func addSerializeImmutableHostnameBucketMiddleware(stack *middleware.Stack, options Options) error { - return stack.Serialize.Insert( - &serializeImmutableHostnameBucketMiddleware{ - UsePathStyle: options.UsePathStyle, - }, - "OperationSerializer", - middleware.Before, - ) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go deleted file mode 100644 index f584537a9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ /dev/null @@ -1,13910 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyxml "github.com/aws/smithy-go/encoding/xml" - "github.com/aws/smithy-go/middleware" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "strconv" - "strings" -) - -type awsRestxml_serializeOpAbortMultipartUpload struct { -} - -func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AbortMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=AbortMultipartUpload") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatchInitiatedTime != nil { - locationName := "X-Amz-If-Match-Initiated-Time" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpCompleteMultipartUpload struct { -} - -func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CompleteMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MultipartUpload != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CompleteMultipartUpload", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if len(v.ChecksumType) > 0 { - locationName := "X-Amz-Checksum-Type" - encoder.SetHeader(locationName).String(string(v.ChecksumType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MpuObjectSize != nil { - locationName := "X-Amz-Mp-Object-Size" - encoder.SetHeader(locationName).String(*v.MpuObjectSize) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpCopyObject struct { -} - -func (*awsRestxml_serializeOpCopyObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CopyObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CopyObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.CopySource != nil { - locationName := "X-Amz-Copy-Source" - encoder.SetHeader(locationName).String(*v.CopySource) - } - - if v.CopySourceIfMatch != nil { - locationName := "X-Amz-Copy-Source-If-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) - } - - if v.CopySourceIfModifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) - } - - if v.CopySourceIfNoneMatch != nil { - locationName := "X-Amz-Copy-Source-If-None-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) - } - - if v.CopySourceIfUnmodifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) - } - - if v.CopySourceSSECustomerAlgorithm != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) - } - - if v.CopySourceSSECustomerKey != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) - } - - if v.CopySourceSSECustomerKeyMD5 != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.ExpectedSourceBucketOwner != nil { - locationName := "X-Amz-Source-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.MetadataDirective) > 0 { - locationName := "X-Amz-Metadata-Directive" - encoder.SetHeader(locationName).String(string(v.MetadataDirective)) - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if len(v.TaggingDirective) > 0 { - locationName := "X-Amz-Tagging-Directive" - encoder.SetHeader(locationName).String(string(v.TaggingDirective)) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - return nil -} - -type awsRestxml_serializeOpCreateBucket struct { -} - -func (*awsRestxml_serializeOpCreateBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.CreateBucketConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CreateBucketConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.ObjectLockEnabledForBucket != nil { - locationName := "X-Amz-Bucket-Object-Lock-Enabled" - encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket) - } - - if len(v.ObjectOwnership) > 0 { - locationName := "X-Amz-Object-Ownership" - encoder.SetHeader(locationName).String(string(v.ObjectOwnership)) - } - - return nil -} - -type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MetadataTableConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetadataTableConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpCreateMultipartUpload struct { -} - -func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateMultipartUploadInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if len(v.ChecksumType) > 0 { - locationName := "X-Amz-Checksum-Type" - encoder.SetHeader(locationName).String(string(v.ChecksumType)) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - return nil -} - -type awsRestxml_serializeOpCreateSession struct { -} - -func (*awsRestxml_serializeOpCreateSession) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateSessionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?session") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if len(v.SessionMode) > 0 { - locationName := "X-Amz-Create-Session-Mode" - encoder.SetHeader(locationName).String(string(v.SessionMode)) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucket struct { -} - -func (*awsRestxml_serializeOpDeleteBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketCors struct { -} - -func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketEncryption struct { -} - -func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketLifecycle struct { -} - -func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketLifecycleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketPolicy struct { -} - -func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketReplication struct { -} - -func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketTagging struct { -} - -func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteBucketWebsite struct { -} - -func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObject struct { -} - -func (*awsRestxml_serializeOpDeleteObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=DeleteObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfMatchLastModifiedTime != nil { - locationName := "X-Amz-If-Match-Last-Modified-Time" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime)) - } - - if v.IfMatchSize != nil { - locationName := "X-Amz-If-Match-Size" - encoder.SetHeader(locationName).Long(*v.IfMatchSize) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObjects struct { -} - -func (*awsRestxml_serializeOpDeleteObjects) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?delete") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Delete != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Delete", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpDeleteObjectTagging struct { -} - -func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeleteObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpDeletePublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DeletePublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "DELETE" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?accelerate") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAcl struct { -} - -func (*awsRestxml_serializeOpGetBucketAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=GetBucketAnalyticsConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketCors struct { -} - -func (*awsRestxml_serializeOpGetBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketEncryption struct { -} - -func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=GetBucketInventoryConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLocation struct { -} - -func (*awsRestxml_serializeOpGetBucketLocation) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLocationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?location") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketLogging struct { -} - -func (*awsRestxml_serializeOpGetBucketLogging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketLoggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?logging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metadataTable") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=GetBucketMetricsConfiguration") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketNotificationConfiguration struct { -} - -func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?notification") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketPolicy struct { -} - -func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketPolicyStatus struct { -} - -func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketPolicyStatusInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policyStatus") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketReplication struct { -} - -func (*awsRestxml_serializeOpGetBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketRequestPayment struct { -} - -func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketRequestPaymentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?requestPayment") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketTagging struct { -} - -func (*awsRestxml_serializeOpGetBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketVersioning struct { -} - -func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketVersioningInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versioning") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetBucketWebsite struct { -} - -func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetObject struct { -} - -func (*awsRestxml_serializeOpGetObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=GetObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumMode) > 0 { - locationName := "X-Amz-Checksum-Mode" - encoder.SetHeader(locationName).String(string(v.ChecksumMode)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfModifiedSince != nil { - locationName := "If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.IfUnmodifiedSince != nil { - locationName := "If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if v.Range != nil { - locationName := "Range" - encoder.SetHeader(locationName).String(*v.Range) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.ResponseCacheControl != nil { - encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) - } - - if v.ResponseContentDisposition != nil { - encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) - } - - if v.ResponseContentEncoding != nil { - encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) - } - - if v.ResponseContentLanguage != nil { - encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) - } - - if v.ResponseContentType != nil { - encoder.SetQuery("response-content-type").String(*v.ResponseContentType) - } - - if v.ResponseExpires != nil { - encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectAcl struct { -} - -func (*awsRestxml_serializeOpGetObjectAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectAttributes struct { -} - -func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectAttributesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?attributes") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MaxParts != nil { - locationName := "X-Amz-Max-Parts" - encoder.SetHeader(locationName).Integer(*v.MaxParts) - } - - if v.ObjectAttributes != nil { - locationName := "X-Amz-Object-Attributes" - if len(v.ObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.ObjectAttributes { - if len(v.ObjectAttributes[i]) > 0 { - escaped := string(v.ObjectAttributes[i]) - if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.ObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.PartNumberMarker != nil { - locationName := "X-Amz-Part-Number-Marker" - encoder.SetHeader(locationName).String(*v.PartNumberMarker) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectLegalHold struct { -} - -func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectLegalHoldInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectLockConfiguration struct { -} - -func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectLockConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?object-lock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectRetention struct { -} - -func (*awsRestxml_serializeOpGetObjectRetention) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectRetentionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectTagging struct { -} - -func (*awsRestxml_serializeOpGetObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpGetObjectTorrent struct { -} - -func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetObjectTorrentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?torrent") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpGetPublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetPublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpHeadBucket struct { -} - -func (*awsRestxml_serializeOpHeadBucket) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*HeadBucketInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "HEAD" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpHeadObject struct { -} - -func (*awsRestxml_serializeOpHeadObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*HeadObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "HEAD" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumMode) > 0 { - locationName := "X-Amz-Checksum-Mode" - encoder.SetHeader(locationName).String(string(v.ChecksumMode)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfModifiedSince != nil { - locationName := "If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.IfUnmodifiedSince != nil { - locationName := "If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince)) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if v.Range != nil { - locationName := "Range" - encoder.SetHeader(locationName).String(*v.Range) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.ResponseCacheControl != nil { - encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) - } - - if v.ResponseContentDisposition != nil { - encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) - } - - if v.ResponseContentEncoding != nil { - encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) - } - - if v.ResponseContentLanguage != nil { - encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) - } - - if v.ResponseContentType != nil { - encoder.SetQuery("response-content-type").String(*v.ResponseContentType) - } - - if v.ResponseExpires != nil { - encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=ListBucketAnalyticsConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - return nil -} - -type awsRestxml_serializeOpListBucketInventoryConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=ListBucketInventoryConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBucketMetricsConfigurations struct { -} - -func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=ListBucketMetricsConfigurations") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpListBuckets struct { -} - -func (*awsRestxml_serializeOpListBuckets) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListBucketsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BucketRegion != nil { - encoder.SetQuery("bucket-region").String(*v.BucketRegion) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.MaxBuckets != nil { - encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets) - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - return nil -} - -type awsRestxml_serializeOpListDirectoryBuckets struct { -} - -func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListDirectoryBucketsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.MaxDirectoryBuckets != nil { - encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets) - } - - return nil -} - -type awsRestxml_serializeOpListMultipartUploads struct { -} - -func (*awsRestxml_serializeOpListMultipartUploads) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListMultipartUploadsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?uploads") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.KeyMarker != nil { - encoder.SetQuery("key-marker").String(*v.KeyMarker) - } - - if v.MaxUploads != nil { - encoder.SetQuery("max-uploads").Integer(*v.MaxUploads) - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.UploadIdMarker != nil { - encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) - } - - return nil -} - -type awsRestxml_serializeOpListObjects struct { -} - -func (*awsRestxml_serializeOpListObjects) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Marker != nil { - encoder.SetQuery("marker").String(*v.Marker) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - return nil -} - -type awsRestxml_serializeOpListObjectsV2 struct { -} - -func (*awsRestxml_serializeOpListObjectsV2) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectsV2Input) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?list-type=2") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContinuationToken != nil { - encoder.SetQuery("continuation-token").String(*v.ContinuationToken) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.FetchOwner != nil { - encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.StartAfter != nil { - encoder.SetQuery("start-after").String(*v.StartAfter) - } - - return nil -} - -type awsRestxml_serializeOpListObjectVersions struct { -} - -func (*awsRestxml_serializeOpListObjectVersions) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListObjectVersionsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versions") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Delimiter != nil { - encoder.SetQuery("delimiter").String(*v.Delimiter) - } - - if len(v.EncodingType) > 0 { - encoder.SetQuery("encoding-type").String(string(v.EncodingType)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.KeyMarker != nil { - encoder.SetQuery("key-marker").String(*v.KeyMarker) - } - - if v.MaxKeys != nil { - encoder.SetQuery("max-keys").Integer(*v.MaxKeys) - } - - if v.OptionalObjectAttributes != nil { - locationName := "X-Amz-Optional-Object-Attributes" - if len(v.OptionalObjectAttributes) == 0 { - encoder.AddHeader(locationName).String("") - } - for i := range v.OptionalObjectAttributes { - if len(v.OptionalObjectAttributes[i]) > 0 { - escaped := string(v.OptionalObjectAttributes[i]) - if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { - escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) - } - - encoder.AddHeader(locationName).String(string(escaped)) - } - } - } - - if v.Prefix != nil { - encoder.SetQuery("prefix").String(*v.Prefix) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionIdMarker != nil { - encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) - } - - return nil -} - -type awsRestxml_serializeOpListParts struct { -} - -func (*awsRestxml_serializeOpListParts) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListPartsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=ListParts") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.MaxParts != nil { - encoder.SetQuery("max-parts").Integer(*v.MaxParts) - } - - if v.PartNumberMarker != nil { - encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?accelerate") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccelerateConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccelerateConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAcl struct { -} - -func (*awsRestxml_serializeOpPutBucketAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccessControlPolicy != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlPolicy", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?analytics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AnalyticsConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AnalyticsConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketCors struct { -} - -func (*awsRestxml_serializeOpPutBucketCors) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketCorsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?cors") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.CORSConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CORSConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketEncryption struct { -} - -func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketEncryptionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?encryption") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ServerSideEncryptionConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ServerSideEncryptionConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.IntelligentTieringConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IntelligentTieringConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketInventoryConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?inventory") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.InventoryConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InventoryConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?lifecycle") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.LifecycleConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LifecycleConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.TransitionDefaultMinimumObjectSize) > 0 { - locationName := "X-Amz-Transition-Default-Minimum-Object-Size" - encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize)) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketLogging struct { -} - -func (*awsRestxml_serializeOpPutBucketLogging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketLoggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?logging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.BucketLoggingStatus != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketLoggingStatus", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketMetricsConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?metrics") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.MetricsConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetricsConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Id != nil { - encoder.SetQuery("id").String(*v.Id) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketNotificationConfiguration struct { -} - -func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?notification") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.NotificationConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NotificationConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.SkipDestinationValidation != nil { - locationName := "X-Amz-Skip-Destination-Validation" - encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketOwnershipControls struct { -} - -func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.OwnershipControls != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OwnershipControls", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketPolicy struct { -} - -func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketPolicyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?policy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("text/plain") - } - - if input.Policy != nil { - payload := strings.NewReader(*input.Policy) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ConfirmRemoveSelfBucketAccess != nil { - locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" - encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketReplication struct { -} - -func (*awsRestxml_serializeOpPutBucketReplication) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketReplicationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?replication") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ReplicationConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicationConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Token != nil { - locationName := "X-Amz-Bucket-Object-Lock-Token" - encoder.SetHeader(locationName).String(*v.Token) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketRequestPayment struct { -} - -func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketRequestPaymentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?requestPayment") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.RequestPaymentConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RequestPaymentConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketTagging struct { -} - -func (*awsRestxml_serializeOpPutBucketTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Tagging != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketVersioning struct { -} - -func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketVersioningInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?versioning") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.VersioningConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "VersioningConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.MFA != nil { - locationName := "X-Amz-Mfa" - encoder.SetHeader(locationName).String(*v.MFA) - } - - return nil -} - -type awsRestxml_serializeOpPutBucketWebsite struct { -} - -func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutBucketWebsiteInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?website") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.WebsiteConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "WebsiteConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpPutObject struct { -} - -func (*awsRestxml_serializeOpPutObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=PutObject") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentDisposition != nil { - locationName := "Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ContentType != nil { - locationName := "Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Expires != nil { - locationName := "Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.IfMatch != nil { - locationName := "If-Match" - encoder.SetHeader(locationName).String(*v.IfMatch) - } - - if v.IfNoneMatch != nil { - locationName := "If-None-Match" - encoder.SetHeader(locationName).String(*v.IfNoneMatch) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSEncryptionContext != nil { - locationName := "X-Amz-Server-Side-Encryption-Context" - encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.Tagging != nil { - locationName := "X-Amz-Tagging" - encoder.SetHeader(locationName).String(*v.Tagging) - } - - if v.WebsiteRedirectLocation != nil { - locationName := "X-Amz-Website-Redirect-Location" - encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) - } - - if v.WriteOffsetBytes != nil { - locationName := "X-Amz-Write-Offset-Bytes" - encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectAcl struct { -} - -func (*awsRestxml_serializeOpPutObjectAcl) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectAclInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.AccessControlPolicy != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlPolicy", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ACL) > 0 { - locationName := "X-Amz-Acl" - encoder.SetHeader(locationName).String(string(v.ACL)) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.GrantFullControl != nil { - locationName := "X-Amz-Grant-Full-Control" - encoder.SetHeader(locationName).String(*v.GrantFullControl) - } - - if v.GrantRead != nil { - locationName := "X-Amz-Grant-Read" - encoder.SetHeader(locationName).String(*v.GrantRead) - } - - if v.GrantReadACP != nil { - locationName := "X-Amz-Grant-Read-Acp" - encoder.SetHeader(locationName).String(*v.GrantReadACP) - } - - if v.GrantWrite != nil { - locationName := "X-Amz-Grant-Write" - encoder.SetHeader(locationName).String(*v.GrantWrite) - } - - if v.GrantWriteACP != nil { - locationName := "X-Amz-Grant-Write-Acp" - encoder.SetHeader(locationName).String(*v.GrantWriteACP) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectLegalHold struct { -} - -func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectLegalHoldInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.LegalHold != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LegalHold", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectLockConfiguration struct { -} - -func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectLockConfigurationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?object-lock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.ObjectLockConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectLockConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.Token != nil { - locationName := "X-Amz-Bucket-Object-Lock-Token" - encoder.SetHeader(locationName).String(*v.Token) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectRetention struct { -} - -func (*awsRestxml_serializeOpPutObjectRetention) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectRetentionInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Retention != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Retention", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.BypassGovernanceRetention != nil { - locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutObjectTagging struct { -} - -func (*awsRestxml_serializeOpPutObjectTagging) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutObjectTaggingInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.Tagging != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpPutPublicAccessBlock struct { -} - -func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*PutPublicAccessBlockInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.PublicAccessBlockConfiguration != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PublicAccessBlockConfiguration", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - return nil -} - -type awsRestxml_serializeOpRestoreObject struct { -} - -func (*awsRestxml_serializeOpRestoreObject) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RestoreObjectInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if input.RestoreRequest != nil { - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/xml") - } - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - payloadRootAttr := []smithyxml.Attr{} - payloadRoot := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RestoreRequest", - }, - Attr: payloadRootAttr, - } - payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - payload := bytes.NewReader(xmlEncoder.Bytes()) - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.VersionId != nil { - encoder.SetQuery("versionId").String(*v.VersionId) - } - - return nil -} - -type awsRestxml_serializeOpSelectObjectContent struct { -} - -func (*awsRestxml_serializeOpSelectObjectContent) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*SelectObjectContentInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/xml") - - xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SelectObjectContentRequest", - }, - Attr: rootAttr, - } - root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) - if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - return nil -} - -func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error { - defer value.Close() - if v.Expression != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expression", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Expression) - } - if len(v.ExpressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ExpressionType)) - } - if v.InputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { - return err - } - } - if v.OutputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { - return err - } - } - if v.RequestProgress != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RequestProgress", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil { - return err - } - } - if v.ScanRange != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ScanRange", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil { - return err - } - } - return nil -} - -type awsRestxml_serializeOpUploadPart struct { -} - -func (*awsRestxml_serializeOpUploadPart) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadPartInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPart") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if len(v.ChecksumAlgorithm) > 0 { - locationName := "X-Amz-Sdk-Checksum-Algorithm" - encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentMD5 != nil { - locationName := "Content-Md5" - encoder.SetHeader(locationName).String(*v.ContentMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpUploadPartCopy struct { -} - -func (*awsRestxml_serializeOpUploadPartCopy) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*UploadPartCopyInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPartCopy") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "PUT" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.CopySource != nil { - locationName := "X-Amz-Copy-Source" - encoder.SetHeader(locationName).String(*v.CopySource) - } - - if v.CopySourceIfMatch != nil { - locationName := "X-Amz-Copy-Source-If-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) - } - - if v.CopySourceIfModifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Modified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) - } - - if v.CopySourceIfNoneMatch != nil { - locationName := "X-Amz-Copy-Source-If-None-Match" - encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) - } - - if v.CopySourceIfUnmodifiedSince != nil { - locationName := "X-Amz-Copy-Source-If-Unmodified-Since" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) - } - - if v.CopySourceRange != nil { - locationName := "X-Amz-Copy-Source-Range" - encoder.SetHeader(locationName).String(*v.CopySourceRange) - } - - if v.CopySourceSSECustomerAlgorithm != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) - } - - if v.CopySourceSSECustomerKey != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) - } - - if v.CopySourceSSECustomerKeyMD5 != nil { - locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) - } - - if v.ExpectedBucketOwner != nil { - locationName := "X-Amz-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) - } - - if v.ExpectedSourceBucketOwner != nil { - locationName := "X-Amz-Source-Expected-Bucket-Owner" - encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) - } - - if v.Key == nil || len(*v.Key) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} - } - if v.Key != nil { - if err := encoder.SetURI("Key").String(*v.Key); err != nil { - return err - } - } - - if v.PartNumber != nil { - encoder.SetQuery("partNumber").Integer(*v.PartNumber) - } - - if len(v.RequestPayer) > 0 { - locationName := "X-Amz-Request-Payer" - encoder.SetHeader(locationName).String(string(v.RequestPayer)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKey != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key" - encoder.SetHeader(locationName).String(*v.SSECustomerKey) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.UploadId != nil { - encoder.SetQuery("uploadId").String(*v.UploadId) - } - - return nil -} - -type awsRestxml_serializeOpWriteGetObjectResponse struct { -} - -func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { - return "OperationSerializer" -} - -func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*WriteGetObjectResponseInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if !restEncoder.HasHeader("Content-Type") { - ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) - restEncoder.SetHeader("Content-Type").String("application/octet-stream") - } - - if input.Body != nil { - payload := input.Body - if request, err = request.SetStream(payload); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AcceptRanges != nil { - locationName := "X-Amz-Fwd-Header-Accept-Ranges" - encoder.SetHeader(locationName).String(*v.AcceptRanges) - } - - if v.BucketKeyEnabled != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) - } - - if v.CacheControl != nil { - locationName := "X-Amz-Fwd-Header-Cache-Control" - encoder.SetHeader(locationName).String(*v.CacheControl) - } - - if v.ChecksumCRC32 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32) - } - - if v.ChecksumCRC32C != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" - encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) - } - - if v.ChecksumCRC64NVME != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme" - encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) - } - - if v.ChecksumSHA1 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" - encoder.SetHeader(locationName).String(*v.ChecksumSHA1) - } - - if v.ChecksumSHA256 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" - encoder.SetHeader(locationName).String(*v.ChecksumSHA256) - } - - if v.ContentDisposition != nil { - locationName := "X-Amz-Fwd-Header-Content-Disposition" - encoder.SetHeader(locationName).String(*v.ContentDisposition) - } - - if v.ContentEncoding != nil { - locationName := "X-Amz-Fwd-Header-Content-Encoding" - encoder.SetHeader(locationName).String(*v.ContentEncoding) - } - - if v.ContentLanguage != nil { - locationName := "X-Amz-Fwd-Header-Content-Language" - encoder.SetHeader(locationName).String(*v.ContentLanguage) - } - - if v.ContentLength != nil { - locationName := "Content-Length" - encoder.SetHeader(locationName).Long(*v.ContentLength) - } - - if v.ContentRange != nil { - locationName := "X-Amz-Fwd-Header-Content-Range" - encoder.SetHeader(locationName).String(*v.ContentRange) - } - - if v.ContentType != nil { - locationName := "X-Amz-Fwd-Header-Content-Type" - encoder.SetHeader(locationName).String(*v.ContentType) - } - - if v.DeleteMarker != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" - encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) - } - - if v.ErrorCode != nil { - locationName := "X-Amz-Fwd-Error-Code" - encoder.SetHeader(locationName).String(*v.ErrorCode) - } - - if v.ErrorMessage != nil { - locationName := "X-Amz-Fwd-Error-Message" - encoder.SetHeader(locationName).String(*v.ErrorMessage) - } - - if v.ETag != nil { - locationName := "X-Amz-Fwd-Header-Etag" - encoder.SetHeader(locationName).String(*v.ETag) - } - - if v.Expiration != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" - encoder.SetHeader(locationName).String(*v.Expiration) - } - - if v.Expires != nil { - locationName := "X-Amz-Fwd-Header-Expires" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) - } - - if v.LastModified != nil { - locationName := "X-Amz-Fwd-Header-Last-Modified" - encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified)) - } - - if v.Metadata != nil { - hv := encoder.Headers("X-Amz-Meta-") - for mapKey, mapVal := range v.Metadata { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } - } - - if v.MissingMeta != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" - encoder.SetHeader(locationName).Integer(*v.MissingMeta) - } - - if len(v.ObjectLockLegalHoldStatus) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold" - encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus)) - } - - if len(v.ObjectLockMode) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode" - encoder.SetHeader(locationName).String(string(v.ObjectLockMode)) - } - - if v.ObjectLockRetainUntilDate != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date" - encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) - } - - if v.PartsCount != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" - encoder.SetHeader(locationName).Integer(*v.PartsCount) - } - - if len(v.ReplicationStatus) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status" - encoder.SetHeader(locationName).String(string(v.ReplicationStatus)) - } - - if len(v.RequestCharged) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged" - encoder.SetHeader(locationName).String(string(v.RequestCharged)) - } - - if v.RequestRoute != nil { - locationName := "X-Amz-Request-Route" - encoder.SetHeader(locationName).String(*v.RequestRoute) - } - - if v.RequestToken != nil { - locationName := "X-Amz-Request-Token" - encoder.SetHeader(locationName).String(*v.RequestToken) - } - - if v.Restore != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Restore" - encoder.SetHeader(locationName).String(*v.Restore) - } - - if len(v.ServerSideEncryption) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption" - encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) - } - - if v.SSECustomerAlgorithm != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" - encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) - } - - if v.SSECustomerKeyMD5 != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" - encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) - } - - if v.SSEKMSKeyId != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" - encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) - } - - if v.StatusCode != nil { - locationName := "X-Amz-Fwd-Status" - encoder.SetHeader(locationName).Integer(*v.StatusCode) - } - - if len(v.StorageClass) > 0 { - locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class" - encoder.SetHeader(locationName).String(string(v.StorageClass)) - } - - if v.TagCount != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" - encoder.SetHeader(locationName).Integer(*v.TagCount) - } - - if v.VersionId != nil { - locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" - encoder.SetHeader(locationName).String(*v.VersionId) - } - - return nil -} - -func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { - defer value.Close() - if v.DaysAfterInitiation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DaysAfterInitiation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.DaysAfterInitiation) - } - return nil -} - -func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error { - defer value.Close() - if v.Grants != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlList", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil { - return err - } - } - if v.Owner != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Owner", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error { - defer value.Close() - if len(v.Owner) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Owner", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Owner)) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.StorageClassAnalysis != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClassAnalysis", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error { - defer value.Close() - if v.S3BucketDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3BucketDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error { - defer value.Close() - switch uv := v.(type) { - case *types.AnalyticsFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil { - return err - } - - case *types.AnalyticsFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.AnalyticsFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { - return err - } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - - } - return nil -} - -func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error { - defer value.Close() - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.BucketAccountId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketAccountId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.BucketAccountId) - } - if len(v.Format) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Format", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Format)) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error { - defer value.Close() - if len(v.DataRedundancy) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DataRedundancy", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.DataRedundancy)) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error { - defer value.Close() - if v.LoggingEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LoggingEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error { - defer value.Close() - if v.Parts != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Part", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error { - defer value.Close() - if v.ChecksumCRC32 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC32", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC32) - } - if v.ChecksumCRC32C != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC32C", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC32C) - } - if v.ChecksumCRC64NVME != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumCRC64NVME", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumCRC64NVME) - } - if v.ChecksumSHA1 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumSHA1", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumSHA1) - } - if v.ChecksumSHA256 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ChecksumSHA256", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ChecksumSHA256) - } - if v.ETag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ETag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ETag) - } - if v.PartNumber != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartNumber", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.PartNumber) - } - return nil -} - -func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error { - defer value.Close() - if v.HttpErrorCodeReturnedEquals != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HttpErrorCodeReturnedEquals", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HttpErrorCodeReturnedEquals) - } - if v.KeyPrefixEquals != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KeyPrefixEquals", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KeyPrefixEquals) - } - return nil -} - -func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.CORSRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CORSRule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error { - defer value.Close() - if v.AllowedHeaders != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedHeader", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil { - return err - } - } - if v.AllowedMethods != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedMethod", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil { - return err - } - } - if v.AllowedOrigins != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowedOrigin", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil { - return err - } - } - if v.ExposeHeaders != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExposeHeader", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.MaxAgeSeconds != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MaxAgeSeconds", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.MaxAgeSeconds) - } - return nil -} - -func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil { - return err - } - } - if v.Location != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Location", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil { - return err - } - } - if len(v.LocationConstraint) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LocationConstraint", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.LocationConstraint)) - } - return nil -} - -func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { - defer value.Close() - if v.AllowQuotedRecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AllowQuotedRecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.AllowQuotedRecordDelimiter) - } - if v.Comments != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Comments", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Comments) - } - if v.FieldDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FieldDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.FieldDelimiter) - } - if len(v.FileHeaderInfo) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FileHeaderInfo", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.FileHeaderInfo)) - } - if v.QuoteCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteCharacter) - } - if v.QuoteEscapeCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteEscapeCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteEscapeCharacter) - } - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error { - defer value.Close() - if v.FieldDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FieldDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.FieldDelimiter) - } - if v.QuoteCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteCharacter) - } - if v.QuoteEscapeCharacter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteEscapeCharacter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QuoteEscapeCharacter) - } - if len(v.QuoteFields) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QuoteFields", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.QuoteFields)) - } - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { - defer value.Close() - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if len(v.Mode) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Mode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Mode)) - } - if v.Years != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Years", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Years) - } - return nil -} - -func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error { - defer value.Close() - if v.Objects != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Object", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil { - return err - } - } - if v.Quiet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Quiet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.Quiet) - } - return nil -} - -func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error { - defer value.Close() - if v.AccessControlTranslation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlTranslation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil { - return err - } - } - if v.Account != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Account", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Account) - } - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.EncryptionConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil { - return err - } - } - if v.Metrics != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Metrics", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil { - return err - } - } - if v.ReplicationTime != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicationTime", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil { - return err - } - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error { - defer value.Close() - if len(v.EncryptionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EncryptionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.EncryptionType)) - } - if v.KMSContext != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSContext", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSContext) - } - if v.KMSKeyId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSKeyId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSKeyId) - } - return nil -} - -func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.ReplicaKmsKeyID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicaKmsKeyID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplicaKmsKeyID) - } - return nil -} - -func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - return nil -} - -func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(string(v[i])) - } - return nil -} - -func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - am.String(v[i]) - } - return nil -} - -func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error { - defer value.Close() - if len(v.Name) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Name)) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error { - defer value.Close() - if len(v.Tier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Tier)) - } - return nil -} - -func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error { - defer value.Close() - if v.Grantee != nil { - rootAttr := []smithyxml.Attr{} - rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) - if len(v.Grantee.Type) > 0 { - var av string - av = string(v.Grantee.Type) - rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) - } - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grantee", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { - return err - } - } - if len(v.Permission) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Permission", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Permission)) - } - return nil -} - -func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error { - defer value.Close() - if v.DisplayName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DisplayName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.DisplayName) - } - if v.EmailAddress != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EmailAddress", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.EmailAddress) - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.URI != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "URI", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.URI) - } - return nil -} - -func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grant", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error { - defer value.Close() - if v.Suffix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Suffix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Suffix) - } - return nil -} - -func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error { - defer value.Close() - if len(v.CompressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CompressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.CompressionType)) - } - if v.CSV != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CSV", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil { - return err - } - } - if v.JSON != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JSON", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil { - return err - } - } - if v.Parquet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Parquet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Tierings != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tiering", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if len(v.IncludedObjectVersions) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IncludedObjectVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.IncludedObjectVersions)) - } - if v.IsEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IsEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.IsEnabled) - } - if v.OptionalFields != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OptionalFields", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil { - return err - } - } - if v.Schedule != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Schedule", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error { - defer value.Close() - if v.S3BucketDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3BucketDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error { - defer value.Close() - if v.SSEKMS != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSE-KMS", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil { - return err - } - } - if v.SSES3 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSE-S3", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Field", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - am.String(string(v[i])) - } - return nil -} - -func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error { - defer value.Close() - if v.AccountId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccountId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.AccountId) - } - if v.Bucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Bucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Bucket) - } - if v.Encryption != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Encryption", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil { - return err - } - } - if len(v.Format) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Format", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Format)) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - return nil -} - -func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error { - defer value.Close() - if len(v.Frequency) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Frequency", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Frequency)) - } - return nil -} - -func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error { - defer value.Close() - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error { - defer value.Close() - if v.RecordDelimiter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RecordDelimiter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.RecordDelimiter) - } - return nil -} - -func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.LambdaFunctionArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CloudFunction", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.LambdaFunctionArn) - } - return nil -} - -func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error { - defer value.Close() - if v.Date != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Date", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.Date)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if v.ExpiredObjectDeleteMarker != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpiredObjectDeleteMarker", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.ExpiredObjectDeleteMarker) - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error { - defer value.Close() - if v.AbortIncompleteMultipartUpload != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AbortIncompleteMultipartUpload", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil { - return err - } - } - if v.Expiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.NoncurrentVersionExpiration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentVersionExpiration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil { - return err - } - } - if v.NoncurrentVersionTransitions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentVersionTransition", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Transitions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Transition", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.ObjectSizeGreaterThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeGreaterThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeGreaterThan) - } - if v.ObjectSizeLessThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeLessThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeLessThan) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil { - return err - } - } - if v.ObjectSizeGreaterThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeGreaterThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeGreaterThan) - } - if v.ObjectSizeLessThan != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectSizeLessThan", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.ObjectSizeLessThan) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error { - defer value.Close() - if v.Name != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Name) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { - defer value.Close() - if v.TargetBucket != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetBucket", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TargetBucket) - } - if v.TargetGrants != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetGrants", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil { - return err - } - } - if v.TargetObjectKeyFormat != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetObjectKeyFormat", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTargetObjectKeyFormat(v.TargetObjectKeyFormat, el); err != nil { - return err - } - } - if v.TargetPrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TargetPrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TargetPrefix) - } - return nil -} - -func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error { - defer value.Close() - if v.Name != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Name", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Name) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.S3TablesDestination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3TablesDestination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { - defer value.Close() - if v.EventThreshold != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EventThreshold", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil { - return err - } - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.AccessPointArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessPointArn", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.AccessPointArn) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - return nil -} - -func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { - defer value.Close() - switch uv := v.(type) { - case *types.MetricsFilterMemberAccessPointArn: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessPointArn", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.MetricsFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil { - return err - } - - case *types.MetricsFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.MetricsFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { - return err - } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { - defer value.Close() - if v.NewerNoncurrentVersions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NewerNoncurrentVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NewerNoncurrentVersions) - } - if v.NoncurrentDays != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentDays", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NoncurrentDays) - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { - defer value.Close() - if v.NewerNoncurrentVersions != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NewerNoncurrentVersions", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NewerNoncurrentVersions) - } - if v.NoncurrentDays != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "NoncurrentDays", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.NoncurrentDays) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.EventBridgeConfiguration != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "EventBridgeConfiguration", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil { - return err - } - } - if v.LambdaFunctionConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CloudFunctionConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil { - return err - } - } - if v.QueueConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "QueueConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil { - return err - } - } - if v.TopicConfigurations != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TopicConfiguration", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { - defer value.Close() - if v.ETag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ETag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ETag) - } - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - if v.LastModifiedTime != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "LastModifiedTime", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime)) - } - if v.Size != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Size", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.Size) - } - if v.VersionId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "VersionId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.VersionId) - } - return nil -} - -func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.ObjectLockEnabled) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectLockEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ObjectLockEnabled)) - } - if v.Rule != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error { - defer value.Close() - if len(v.Mode) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Mode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Mode)) - } - if v.RetainUntilDate != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RetainUntilDate", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.RetainUntilDate)) - } - return nil -} - -func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error { - defer value.Close() - if v.DefaultRetention != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DefaultRetention", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error { - defer value.Close() - if v.S3 != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "S3", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error { - defer value.Close() - if v.CSV != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CSV", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil { - return err - } - } - if v.JSON != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "JSON", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error { - defer value.Close() - if v.DisplayName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DisplayName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.DisplayName) - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error { - defer value.Close() - if len(v.ObjectOwnership) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ObjectOwnership", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ObjectOwnership)) - } - return nil -} - -func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentPartitionedPrefix(v *types.PartitionedPrefix, value smithyxml.Value) error { - defer value.Close() - if len(v.PartitionDateSource) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartitionDateSource", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.PartitionDateSource)) - } - return nil -} - -func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.BlockPublicAcls != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BlockPublicAcls", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BlockPublicAcls) - } - if v.BlockPublicPolicy != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BlockPublicPolicy", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BlockPublicPolicy) - } - if v.IgnorePublicAcls != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IgnorePublicAcls", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.IgnorePublicAcls) - } - if v.RestrictPublicBuckets != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RestrictPublicBuckets", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.RestrictPublicBuckets) - } - return nil -} - -func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.QueueArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Queue", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.QueueArn) - } - return nil -} - -func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error { - defer value.Close() - if v.HostName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HostName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HostName) - } - if v.HttpRedirectCode != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HttpRedirectCode", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HttpRedirectCode) - } - if len(v.Protocol) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Protocol", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Protocol)) - } - if v.ReplaceKeyPrefixWith != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplaceKeyPrefixWith", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplaceKeyPrefixWith) - } - if v.ReplaceKeyWith != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplaceKeyWith", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ReplaceKeyWith) - } - return nil -} - -func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error { - defer value.Close() - if v.HostName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "HostName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.HostName) - } - if len(v.Protocol) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Protocol", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Protocol)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Role != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Role", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Role) - } - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error { - defer value.Close() - if v.DeleteMarkerReplication != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DeleteMarkerReplication", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil { - return err - } - } - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil { - return err - } - } - if v.ExistingObjectReplication != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExistingObjectReplication", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil { - return err - } - } - if v.ID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.ID) - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Priority != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Priority", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Priority) - } - if v.SourceSelectionCriteria != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SourceSelectionCriteria", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil { - return err - } - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error { - defer value.Close() - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tags != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error { - defer value.Close() - if v.And != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "And", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if v.Tag != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - if v.Time != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Time", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { - defer value.Close() - if v.Minutes != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Minutes", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Minutes) - } - return nil -} - -func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.Payer) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Payer", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Payer)) - } - return nil -} - -func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { - defer value.Close() - if v.Enabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Enabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.Enabled) - } - return nil -} - -func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { - defer value.Close() - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if v.Description != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Description", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Description) - } - if v.GlacierJobParameters != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "GlacierJobParameters", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil { - return err - } - } - if v.OutputLocation != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputLocation", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil { - return err - } - } - if v.SelectParameters != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SelectParameters", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil { - return err - } - } - if len(v.Tier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Tier)) - } - if len(v.Type) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Type", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Type)) - } - return nil -} - -func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error { - defer value.Close() - if v.Condition != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Condition", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil { - return err - } - } - if v.Redirect != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Redirect", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RoutingRule", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error { - defer value.Close() - if v.FilterRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "FilterRule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error { - defer value.Close() - if v.AccessControlList != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessControlList", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil { - return err - } - } - if v.BucketName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.BucketName) - } - if len(v.CannedACL) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "CannedACL", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.CannedACL)) - } - if v.Encryption != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Encryption", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil { - return err - } - } - if v.Prefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Prefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Prefix) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - if v.Tagging != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tagging", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil { - return err - } - } - if v.UserMetadata != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "UserMetadata", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error { - defer value.Close() - if v.TableBucketArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TableBucketArn", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TableBucketArn) - } - if v.TableName != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TableName", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TableName) - } - return nil -} - -func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { - defer value.Close() - if v.End != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "End", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.End) - } - if v.Start != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Start", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Long(*v.Start) - } - return nil -} - -func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error { - defer value.Close() - if v.Expression != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Expression", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Expression) - } - if len(v.ExpressionType) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ExpressionType", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.ExpressionType)) - } - if v.InputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "InputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil { - return err - } - } - if v.OutputSerialization != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSerialization", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error { - defer value.Close() - if v.KMSMasterKeyID != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KMSMasterKeyID", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KMSMasterKeyID) - } - if len(v.SSEAlgorithm) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SSEAlgorithm", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.SSEAlgorithm)) - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Rules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Rule", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error { - defer value.Close() - if v.ApplyServerSideEncryptionByDefault != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ApplyServerSideEncryptionByDefault", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil { - return err - } - } - if v.BucketKeyEnabled != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "BucketKeyEnabled", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Boolean(*v.BucketKeyEnabled) - } - return nil -} - -func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentSimplePrefix(v *types.SimplePrefix, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { - defer value.Close() - if v.ReplicaModifications != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ReplicaModifications", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil { - return err - } - } - if v.SseKmsEncryptedObjects != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SseKmsEncryptedObjects", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error { - defer value.Close() - if v.KeyId != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "KeyId", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.KeyId) - } - return nil -} - -func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error { - defer value.Close() - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error { - defer value.Close() - return nil -} - -func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error { - defer value.Close() - if v.DataExport != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "DataExport", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error { - defer value.Close() - if v.Destination != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Destination", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil { - return err - } - } - if len(v.OutputSchemaVersion) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "OutputSchemaVersion", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.OutputSchemaVersion)) - } - return nil -} - -func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error { - defer value.Close() - if v.Key != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Key", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Key) - } - if v.Value != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Value", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Value) - } - return nil -} - -func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error { - defer value.Close() - if v.TagSet != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "TagSet", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Tag", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error { - defer value.Close() - if v.Grantee != nil { - rootAttr := []smithyxml.Attr{} - rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance")) - if len(v.Grantee.Type) > 0 { - var av string - av = string(v.Grantee.Type) - rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av)) - } - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grantee", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil { - return err - } - } - if len(v.Permission) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Permission", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Permission)) - } - return nil -} - -func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Grant", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTargetObjectKeyFormat(v *types.TargetObjectKeyFormat, value smithyxml.Value) error { - defer value.Close() - if v.PartitionedPrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "PartitionedPrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentPartitionedPrefix(v.PartitionedPrefix, el); err != nil { - return err - } - } - if v.SimplePrefix != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "SimplePrefix", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentSimplePrefix(v.SimplePrefix, el); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { - defer value.Close() - if len(v.AccessTier) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "AccessTier", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.AccessTier)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - return nil -} - -func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.Events != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Event", - }, - Attr: rootAttr, - } - el := value.FlattenedElement(root) - if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil { - return err - } - } - if v.Filter != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Filter", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil { - return err - } - } - if v.Id != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Id", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.Id) - } - if v.TopicArn != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Topic", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(*v.TopicArn) - } - return nil -} - -func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error { - defer value.Close() - if v.Date != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Date", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(smithytime.FormatDateTime(*v.Date)) - } - if v.Days != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Days", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.Integer(*v.Days) - } - if len(v.StorageClass) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "StorageClass", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.StorageClass)) - } - return nil -} - -func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - array = value.Array() - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error { - var array *smithyxml.Array - if !value.IsFlattened() { - defer value.Close() - } - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MetadataEntry", - }, - Attr: customMemberNameAttr, - } - array = value.ArrayWithCustomName(customMemberName) - for i := range v { - am := array.Member() - if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil { - return err - } - } - return nil -} - -func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error { - defer value.Close() - if len(v.MFADelete) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "MfaDelete", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.MFADelete)) - } - if len(v.Status) > 0 { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "Status", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - el.String(string(v.Status)) - } - return nil -} - -func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error { - defer value.Close() - if v.ErrorDocument != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "ErrorDocument", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil { - return err - } - } - if v.IndexDocument != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "IndexDocument", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil { - return err - } - } - if v.RedirectAllRequestsTo != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RedirectAllRequestsTo", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil { - return err - } - } - if v.RoutingRules != nil { - rootAttr := []smithyxml.Attr{} - root := smithyxml.StartElement{ - Name: smithyxml.Name{ - Local: "RoutingRules", - }, - Attr: rootAttr, - } - el := value.MemberElement(root) - if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go deleted file mode 100644 index 4af937c6d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ /dev/null @@ -1,1464 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -type AnalyticsS3ExportFileFormat string - -// Enum values for AnalyticsS3ExportFileFormat -const ( - AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV" -) - -// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { - return []AnalyticsS3ExportFileFormat{ - "CSV", - } -} - -type ArchiveStatus string - -// Enum values for ArchiveStatus -const ( - ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS" - ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS" -) - -// Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ArchiveStatus) Values() []ArchiveStatus { - return []ArchiveStatus{ - "ARCHIVE_ACCESS", - "DEEP_ARCHIVE_ACCESS", - } -} - -type BucketAccelerateStatus string - -// Enum values for BucketAccelerateStatus -const ( - BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled" - BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended" -) - -// Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { - return []BucketAccelerateStatus{ - "Enabled", - "Suspended", - } -} - -type BucketCannedACL string - -// Enum values for BucketCannedACL -const ( - BucketCannedACLPrivate BucketCannedACL = "private" - BucketCannedACLPublicRead BucketCannedACL = "public-read" - BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write" - BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read" -) - -// Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketCannedACL) Values() []BucketCannedACL { - return []BucketCannedACL{ - "private", - "public-read", - "public-read-write", - "authenticated-read", - } -} - -type BucketLocationConstraint string - -// Enum values for BucketLocationConstraint -const ( - BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1" - BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1" - BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1" - BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" - BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" - BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" - BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" - BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" - BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" - BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" - BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" - BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" - BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" - BucketLocationConstraintEu BucketLocationConstraint = "EU" - BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" - BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" - BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" - BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" - BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" - BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" - BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" - BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" - BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" - BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" - BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1" - BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1" - BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1" - BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" -) - -// Values returns all known values for BucketLocationConstraint. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketLocationConstraint) Values() []BucketLocationConstraint { - return []BucketLocationConstraint{ - "af-south-1", - "ap-east-1", - "ap-northeast-1", - "ap-northeast-2", - "ap-northeast-3", - "ap-south-1", - "ap-south-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-southeast-3", - "ca-central-1", - "cn-north-1", - "cn-northwest-1", - "EU", - "eu-central-1", - "eu-north-1", - "eu-south-1", - "eu-south-2", - "eu-west-1", - "eu-west-2", - "eu-west-3", - "me-south-1", - "sa-east-1", - "us-east-2", - "us-gov-east-1", - "us-gov-west-1", - "us-west-1", - "us-west-2", - } -} - -type BucketLogsPermission string - -// Enum values for BucketLogsPermission -const ( - BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" - BucketLogsPermissionRead BucketLogsPermission = "READ" - BucketLogsPermissionWrite BucketLogsPermission = "WRITE" -) - -// Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketLogsPermission) Values() []BucketLogsPermission { - return []BucketLogsPermission{ - "FULL_CONTROL", - "READ", - "WRITE", - } -} - -type BucketType string - -// Enum values for BucketType -const ( - BucketTypeDirectory BucketType = "Directory" -) - -// Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketType) Values() []BucketType { - return []BucketType{ - "Directory", - } -} - -type BucketVersioningStatus string - -// Enum values for BucketVersioningStatus -const ( - BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled" - BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended" -) - -// Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (BucketVersioningStatus) Values() []BucketVersioningStatus { - return []BucketVersioningStatus{ - "Enabled", - "Suspended", - } -} - -type ChecksumAlgorithm string - -// Enum values for ChecksumAlgorithm -const ( - ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" - ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" - ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" - ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" - ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME" -) - -// Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { - return []ChecksumAlgorithm{ - "CRC32", - "CRC32C", - "SHA1", - "SHA256", - "CRC64NVME", - } -} - -type ChecksumMode string - -// Enum values for ChecksumMode -const ( - ChecksumModeEnabled ChecksumMode = "ENABLED" -) - -// Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumMode) Values() []ChecksumMode { - return []ChecksumMode{ - "ENABLED", - } -} - -type ChecksumType string - -// Enum values for ChecksumType -const ( - ChecksumTypeComposite ChecksumType = "COMPOSITE" - ChecksumTypeFullObject ChecksumType = "FULL_OBJECT" -) - -// Values returns all known values for ChecksumType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ChecksumType) Values() []ChecksumType { - return []ChecksumType{ - "COMPOSITE", - "FULL_OBJECT", - } -} - -type CompressionType string - -// Enum values for CompressionType -const ( - CompressionTypeNone CompressionType = "NONE" - CompressionTypeGzip CompressionType = "GZIP" - CompressionTypeBzip2 CompressionType = "BZIP2" -) - -// Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (CompressionType) Values() []CompressionType { - return []CompressionType{ - "NONE", - "GZIP", - "BZIP2", - } -} - -type DataRedundancy string - -// Enum values for DataRedundancy -const ( - DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" - DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone" -) - -// Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (DataRedundancy) Values() []DataRedundancy { - return []DataRedundancy{ - "SingleAvailabilityZone", - "SingleLocalZone", - } -} - -type DeleteMarkerReplicationStatus string - -// Enum values for DeleteMarkerReplicationStatus -const ( - DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled" - DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled" -) - -// Values returns all known values for DeleteMarkerReplicationStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { - return []DeleteMarkerReplicationStatus{ - "Enabled", - "Disabled", - } -} - -type EncodingType string - -// Enum values for EncodingType -const ( - EncodingTypeUrl EncodingType = "url" -) - -// Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (EncodingType) Values() []EncodingType { - return []EncodingType{ - "url", - } -} - -type Event string - -// Enum values for Event -const ( - EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" - EventS3ObjectCreated Event = "s3:ObjectCreated:*" - EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" - EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" - EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" - EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" - EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" - EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" - EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" - EventS3ObjectRestore Event = "s3:ObjectRestore:*" - EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" - EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" - EventS3Replication Event = "s3:Replication:*" - EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication" - EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked" - EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold" - EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold" - EventS3ObjectRestoreDelete Event = "s3:ObjectRestore:Delete" - EventS3LifecycleTransition Event = "s3:LifecycleTransition" - EventS3IntelligentTiering Event = "s3:IntelligentTiering" - EventS3ObjectAclPut Event = "s3:ObjectAcl:Put" - EventS3LifecycleExpiration Event = "s3:LifecycleExpiration:*" - EventS3LifecycleExpirationDelete Event = "s3:LifecycleExpiration:Delete" - EventS3LifecycleExpirationDeleteMarkerCreated Event = "s3:LifecycleExpiration:DeleteMarkerCreated" - EventS3ObjectTagging Event = "s3:ObjectTagging:*" - EventS3ObjectTaggingPut Event = "s3:ObjectTagging:Put" - EventS3ObjectTaggingDelete Event = "s3:ObjectTagging:Delete" -) - -// Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Event) Values() []Event { - return []Event{ - "s3:ReducedRedundancyLostObject", - "s3:ObjectCreated:*", - "s3:ObjectCreated:Put", - "s3:ObjectCreated:Post", - "s3:ObjectCreated:Copy", - "s3:ObjectCreated:CompleteMultipartUpload", - "s3:ObjectRemoved:*", - "s3:ObjectRemoved:Delete", - "s3:ObjectRemoved:DeleteMarkerCreated", - "s3:ObjectRestore:*", - "s3:ObjectRestore:Post", - "s3:ObjectRestore:Completed", - "s3:Replication:*", - "s3:Replication:OperationFailedReplication", - "s3:Replication:OperationNotTracked", - "s3:Replication:OperationMissedThreshold", - "s3:Replication:OperationReplicatedAfterThreshold", - "s3:ObjectRestore:Delete", - "s3:LifecycleTransition", - "s3:IntelligentTiering", - "s3:ObjectAcl:Put", - "s3:LifecycleExpiration:*", - "s3:LifecycleExpiration:Delete", - "s3:LifecycleExpiration:DeleteMarkerCreated", - "s3:ObjectTagging:*", - "s3:ObjectTagging:Put", - "s3:ObjectTagging:Delete", - } -} - -type ExistingObjectReplicationStatus string - -// Enum values for ExistingObjectReplicationStatus -const ( - ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled" - ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled" -) - -// Values returns all known values for ExistingObjectReplicationStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { - return []ExistingObjectReplicationStatus{ - "Enabled", - "Disabled", - } -} - -type ExpirationStatus string - -// Enum values for ExpirationStatus -const ( - ExpirationStatusEnabled ExpirationStatus = "Enabled" - ExpirationStatusDisabled ExpirationStatus = "Disabled" -) - -// Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExpirationStatus) Values() []ExpirationStatus { - return []ExpirationStatus{ - "Enabled", - "Disabled", - } -} - -type ExpressionType string - -// Enum values for ExpressionType -const ( - ExpressionTypeSql ExpressionType = "SQL" -) - -// Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ExpressionType) Values() []ExpressionType { - return []ExpressionType{ - "SQL", - } -} - -type FileHeaderInfo string - -// Enum values for FileHeaderInfo -const ( - FileHeaderInfoUse FileHeaderInfo = "USE" - FileHeaderInfoIgnore FileHeaderInfo = "IGNORE" - FileHeaderInfoNone FileHeaderInfo = "NONE" -) - -// Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (FileHeaderInfo) Values() []FileHeaderInfo { - return []FileHeaderInfo{ - "USE", - "IGNORE", - "NONE", - } -} - -type FilterRuleName string - -// Enum values for FilterRuleName -const ( - FilterRuleNamePrefix FilterRuleName = "prefix" - FilterRuleNameSuffix FilterRuleName = "suffix" -) - -// Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (FilterRuleName) Values() []FilterRuleName { - return []FilterRuleName{ - "prefix", - "suffix", - } -} - -type IntelligentTieringAccessTier string - -// Enum values for IntelligentTieringAccessTier -const ( - IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS" - IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" -) - -// Values returns all known values for IntelligentTieringAccessTier. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { - return []IntelligentTieringAccessTier{ - "ARCHIVE_ACCESS", - "DEEP_ARCHIVE_ACCESS", - } -} - -type IntelligentTieringStatus string - -// Enum values for IntelligentTieringStatus -const ( - IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled" - IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" -) - -// Values returns all known values for IntelligentTieringStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { - return []IntelligentTieringStatus{ - "Enabled", - "Disabled", - } -} - -type InventoryFormat string - -// Enum values for InventoryFormat -const ( - InventoryFormatCsv InventoryFormat = "CSV" - InventoryFormatOrc InventoryFormat = "ORC" - InventoryFormatParquet InventoryFormat = "Parquet" -) - -// Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryFormat) Values() []InventoryFormat { - return []InventoryFormat{ - "CSV", - "ORC", - "Parquet", - } -} - -type InventoryFrequency string - -// Enum values for InventoryFrequency -const ( - InventoryFrequencyDaily InventoryFrequency = "Daily" - InventoryFrequencyWeekly InventoryFrequency = "Weekly" -) - -// Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryFrequency) Values() []InventoryFrequency { - return []InventoryFrequency{ - "Daily", - "Weekly", - } -} - -type InventoryIncludedObjectVersions string - -// Enum values for InventoryIncludedObjectVersions -const ( - InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All" - InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current" -) - -// Values returns all known values for InventoryIncludedObjectVersions. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { - return []InventoryIncludedObjectVersions{ - "All", - "Current", - } -} - -type InventoryOptionalField string - -// Enum values for InventoryOptionalField -const ( - InventoryOptionalFieldSize InventoryOptionalField = "Size" - InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate" - InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass" - InventoryOptionalFieldETag InventoryOptionalField = "ETag" - InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded" - InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus" - InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus" - InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate" - InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode" - InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus" - InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" - InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" - InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" - InventoryOptionalFieldObjectAccessControlList InventoryOptionalField = "ObjectAccessControlList" - InventoryOptionalFieldObjectOwner InventoryOptionalField = "ObjectOwner" -) - -// Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (InventoryOptionalField) Values() []InventoryOptionalField { - return []InventoryOptionalField{ - "Size", - "LastModifiedDate", - "StorageClass", - "ETag", - "IsMultipartUploaded", - "ReplicationStatus", - "EncryptionStatus", - "ObjectLockRetainUntilDate", - "ObjectLockMode", - "ObjectLockLegalHoldStatus", - "IntelligentTieringAccessTier", - "BucketKeyStatus", - "ChecksumAlgorithm", - "ObjectAccessControlList", - "ObjectOwner", - } -} - -type JSONType string - -// Enum values for JSONType -const ( - JSONTypeDocument JSONType = "DOCUMENT" - JSONTypeLines JSONType = "LINES" -) - -// Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (JSONType) Values() []JSONType { - return []JSONType{ - "DOCUMENT", - "LINES", - } -} - -type LocationType string - -// Enum values for LocationType -const ( - LocationTypeAvailabilityZone LocationType = "AvailabilityZone" - LocationTypeLocalZone LocationType = "LocalZone" -) - -// Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (LocationType) Values() []LocationType { - return []LocationType{ - "AvailabilityZone", - "LocalZone", - } -} - -type MetadataDirective string - -// Enum values for MetadataDirective -const ( - MetadataDirectiveCopy MetadataDirective = "COPY" - MetadataDirectiveReplace MetadataDirective = "REPLACE" -) - -// Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MetadataDirective) Values() []MetadataDirective { - return []MetadataDirective{ - "COPY", - "REPLACE", - } -} - -type MetricsStatus string - -// Enum values for MetricsStatus -const ( - MetricsStatusEnabled MetricsStatus = "Enabled" - MetricsStatusDisabled MetricsStatus = "Disabled" -) - -// Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MetricsStatus) Values() []MetricsStatus { - return []MetricsStatus{ - "Enabled", - "Disabled", - } -} - -type MFADelete string - -// Enum values for MFADelete -const ( - MFADeleteEnabled MFADelete = "Enabled" - MFADeleteDisabled MFADelete = "Disabled" -) - -// Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MFADelete) Values() []MFADelete { - return []MFADelete{ - "Enabled", - "Disabled", - } -} - -type MFADeleteStatus string - -// Enum values for MFADeleteStatus -const ( - MFADeleteStatusEnabled MFADeleteStatus = "Enabled" - MFADeleteStatusDisabled MFADeleteStatus = "Disabled" -) - -// Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (MFADeleteStatus) Values() []MFADeleteStatus { - return []MFADeleteStatus{ - "Enabled", - "Disabled", - } -} - -type ObjectAttributes string - -// Enum values for ObjectAttributes -const ( - ObjectAttributesEtag ObjectAttributes = "ETag" - ObjectAttributesChecksum ObjectAttributes = "Checksum" - ObjectAttributesObjectParts ObjectAttributes = "ObjectParts" - ObjectAttributesStorageClass ObjectAttributes = "StorageClass" - ObjectAttributesObjectSize ObjectAttributes = "ObjectSize" -) - -// Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectAttributes) Values() []ObjectAttributes { - return []ObjectAttributes{ - "ETag", - "Checksum", - "ObjectParts", - "StorageClass", - "ObjectSize", - } -} - -type ObjectCannedACL string - -// Enum values for ObjectCannedACL -const ( - ObjectCannedACLPrivate ObjectCannedACL = "private" - ObjectCannedACLPublicRead ObjectCannedACL = "public-read" - ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write" - ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read" - ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read" - ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read" - ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control" -) - -// Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectCannedACL) Values() []ObjectCannedACL { - return []ObjectCannedACL{ - "private", - "public-read", - "public-read-write", - "authenticated-read", - "aws-exec-read", - "bucket-owner-read", - "bucket-owner-full-control", - } -} - -type ObjectLockEnabled string - -// Enum values for ObjectLockEnabled -const ( - ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled" -) - -// Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockEnabled) Values() []ObjectLockEnabled { - return []ObjectLockEnabled{ - "Enabled", - } -} - -type ObjectLockLegalHoldStatus string - -// Enum values for ObjectLockLegalHoldStatus -const ( - ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON" - ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF" -) - -// Values returns all known values for ObjectLockLegalHoldStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { - return []ObjectLockLegalHoldStatus{ - "ON", - "OFF", - } -} - -type ObjectLockMode string - -// Enum values for ObjectLockMode -const ( - ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE" - ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE" -) - -// Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockMode) Values() []ObjectLockMode { - return []ObjectLockMode{ - "GOVERNANCE", - "COMPLIANCE", - } -} - -type ObjectLockRetentionMode string - -// Enum values for ObjectLockRetentionMode -const ( - ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE" - ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE" -) - -// Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { - return []ObjectLockRetentionMode{ - "GOVERNANCE", - "COMPLIANCE", - } -} - -type ObjectOwnership string - -// Enum values for ObjectOwnership -const ( - ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred" - ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter" - ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced" -) - -// Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectOwnership) Values() []ObjectOwnership { - return []ObjectOwnership{ - "BucketOwnerPreferred", - "ObjectWriter", - "BucketOwnerEnforced", - } -} - -type ObjectStorageClass string - -// Enum values for ObjectStorageClass -const ( - ObjectStorageClassStandard ObjectStorageClass = "STANDARD" - ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" - ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" - ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" - ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" - ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" - ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" - ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" - ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" - ObjectStorageClassSnow ObjectStorageClass = "SNOW" - ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE" -) - -// Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectStorageClass) Values() []ObjectStorageClass { - return []ObjectStorageClass{ - "STANDARD", - "REDUCED_REDUNDANCY", - "GLACIER", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "DEEP_ARCHIVE", - "OUTPOSTS", - "GLACIER_IR", - "SNOW", - "EXPRESS_ONEZONE", - } -} - -type ObjectVersionStorageClass string - -// Enum values for ObjectVersionStorageClass -const ( - ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD" -) - -// Values returns all known values for ObjectVersionStorageClass. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { - return []ObjectVersionStorageClass{ - "STANDARD", - } -} - -type OptionalObjectAttributes string - -// Enum values for OptionalObjectAttributes -const ( - OptionalObjectAttributesRestoreStatus OptionalObjectAttributes = "RestoreStatus" -) - -// Values returns all known values for OptionalObjectAttributes. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { - return []OptionalObjectAttributes{ - "RestoreStatus", - } -} - -type OwnerOverride string - -// Enum values for OwnerOverride -const ( - OwnerOverrideDestination OwnerOverride = "Destination" -) - -// Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (OwnerOverride) Values() []OwnerOverride { - return []OwnerOverride{ - "Destination", - } -} - -type PartitionDateSource string - -// Enum values for PartitionDateSource -const ( - PartitionDateSourceEventTime PartitionDateSource = "EventTime" - PartitionDateSourceDeliveryTime PartitionDateSource = "DeliveryTime" -) - -// Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (PartitionDateSource) Values() []PartitionDateSource { - return []PartitionDateSource{ - "EventTime", - "DeliveryTime", - } -} - -type Payer string - -// Enum values for Payer -const ( - PayerRequester Payer = "Requester" - PayerBucketOwner Payer = "BucketOwner" -) - -// Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Payer) Values() []Payer { - return []Payer{ - "Requester", - "BucketOwner", - } -} - -type Permission string - -// Enum values for Permission -const ( - PermissionFullControl Permission = "FULL_CONTROL" - PermissionWrite Permission = "WRITE" - PermissionWriteAcp Permission = "WRITE_ACP" - PermissionRead Permission = "READ" - PermissionReadAcp Permission = "READ_ACP" -) - -// Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Permission) Values() []Permission { - return []Permission{ - "FULL_CONTROL", - "WRITE", - "WRITE_ACP", - "READ", - "READ_ACP", - } -} - -type Protocol string - -// Enum values for Protocol -const ( - ProtocolHttp Protocol = "http" - ProtocolHttps Protocol = "https" -) - -// Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Protocol) Values() []Protocol { - return []Protocol{ - "http", - "https", - } -} - -type QuoteFields string - -// Enum values for QuoteFields -const ( - QuoteFieldsAlways QuoteFields = "ALWAYS" - QuoteFieldsAsneeded QuoteFields = "ASNEEDED" -) - -// Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (QuoteFields) Values() []QuoteFields { - return []QuoteFields{ - "ALWAYS", - "ASNEEDED", - } -} - -type ReplicaModificationsStatus string - -// Enum values for ReplicaModificationsStatus -const ( - ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled" - ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled" -) - -// Values returns all known values for ReplicaModificationsStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { - return []ReplicaModificationsStatus{ - "Enabled", - "Disabled", - } -} - -type ReplicationRuleStatus string - -// Enum values for ReplicationRuleStatus -const ( - ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled" - ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" -) - -// Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { - return []ReplicationRuleStatus{ - "Enabled", - "Disabled", - } -} - -type ReplicationStatus string - -// Enum values for ReplicationStatus -const ( - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusPending ReplicationStatus = "PENDING" - ReplicationStatusFailed ReplicationStatus = "FAILED" - ReplicationStatusReplica ReplicationStatus = "REPLICA" - ReplicationStatusCompleted ReplicationStatus = "COMPLETED" -) - -// Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationStatus) Values() []ReplicationStatus { - return []ReplicationStatus{ - "COMPLETE", - "PENDING", - "FAILED", - "REPLICA", - "COMPLETED", - } -} - -type ReplicationTimeStatus string - -// Enum values for ReplicationTimeStatus -const ( - ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled" - ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" -) - -// Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { - return []ReplicationTimeStatus{ - "Enabled", - "Disabled", - } -} - -type RequestCharged string - -// Enum values for RequestCharged -const ( - RequestChargedRequester RequestCharged = "requester" -) - -// Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RequestCharged) Values() []RequestCharged { - return []RequestCharged{ - "requester", - } -} - -type RequestPayer string - -// Enum values for RequestPayer -const ( - RequestPayerRequester RequestPayer = "requester" -) - -// Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RequestPayer) Values() []RequestPayer { - return []RequestPayer{ - "requester", - } -} - -type RestoreRequestType string - -// Enum values for RestoreRequestType -const ( - RestoreRequestTypeSelect RestoreRequestType = "SELECT" -) - -// Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (RestoreRequestType) Values() []RestoreRequestType { - return []RestoreRequestType{ - "SELECT", - } -} - -type ServerSideEncryption string - -// Enum values for ServerSideEncryption -const ( - ServerSideEncryptionAes256 ServerSideEncryption = "AES256" - ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" - ServerSideEncryptionAwsKmsDsse ServerSideEncryption = "aws:kms:dsse" -) - -// Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (ServerSideEncryption) Values() []ServerSideEncryption { - return []ServerSideEncryption{ - "AES256", - "aws:kms", - "aws:kms:dsse", - } -} - -type SessionMode string - -// Enum values for SessionMode -const ( - SessionModeReadOnly SessionMode = "ReadOnly" - SessionModeReadWrite SessionMode = "ReadWrite" -) - -// Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (SessionMode) Values() []SessionMode { - return []SessionMode{ - "ReadOnly", - "ReadWrite", - } -} - -type SseKmsEncryptedObjectsStatus string - -// Enum values for SseKmsEncryptedObjectsStatus -const ( - SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled" - SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" -) - -// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { - return []SseKmsEncryptedObjectsStatus{ - "Enabled", - "Disabled", - } -} - -type StorageClass string - -// Enum values for StorageClass -const ( - StorageClassStandard StorageClass = "STANDARD" - StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" - StorageClassStandardIa StorageClass = "STANDARD_IA" - StorageClassOnezoneIa StorageClass = "ONEZONE_IA" - StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" - StorageClassGlacier StorageClass = "GLACIER" - StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" - StorageClassOutposts StorageClass = "OUTPOSTS" - StorageClassGlacierIr StorageClass = "GLACIER_IR" - StorageClassSnow StorageClass = "SNOW" - StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE" -) - -// Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (StorageClass) Values() []StorageClass { - return []StorageClass{ - "STANDARD", - "REDUCED_REDUNDANCY", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "GLACIER", - "DEEP_ARCHIVE", - "OUTPOSTS", - "GLACIER_IR", - "SNOW", - "EXPRESS_ONEZONE", - } -} - -type StorageClassAnalysisSchemaVersion string - -// Enum values for StorageClassAnalysisSchemaVersion -const ( - StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" -) - -// Values returns all known values for StorageClassAnalysisSchemaVersion. Note -// that this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { - return []StorageClassAnalysisSchemaVersion{ - "V_1", - } -} - -type TaggingDirective string - -// Enum values for TaggingDirective -const ( - TaggingDirectiveCopy TaggingDirective = "COPY" - TaggingDirectiveReplace TaggingDirective = "REPLACE" -) - -// Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TaggingDirective) Values() []TaggingDirective { - return []TaggingDirective{ - "COPY", - "REPLACE", - } -} - -type Tier string - -// Enum values for Tier -const ( - TierStandard Tier = "Standard" - TierBulk Tier = "Bulk" - TierExpedited Tier = "Expedited" -) - -// Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Tier) Values() []Tier { - return []Tier{ - "Standard", - "Bulk", - "Expedited", - } -} - -type TransitionDefaultMinimumObjectSize string - -// Enum values for TransitionDefaultMinimumObjectSize -const ( - TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class" - TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K" -) - -// Values returns all known values for TransitionDefaultMinimumObjectSize. Note -// that this can be expanded in the future, and so it is only as up to date as the -// client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize { - return []TransitionDefaultMinimumObjectSize{ - "varies_by_storage_class", - "all_storage_classes_128K", - } -} - -type TransitionStorageClass string - -// Enum values for TransitionStorageClass -const ( - TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" - TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" - TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" - TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" - TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" - TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR" -) - -// Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (TransitionStorageClass) Values() []TransitionStorageClass { - return []TransitionStorageClass{ - "GLACIER", - "STANDARD_IA", - "ONEZONE_IA", - "INTELLIGENT_TIERING", - "DEEP_ARCHIVE", - "GLACIER_IR", - } -} - -type Type string - -// Enum values for Type -const ( - TypeCanonicalUser Type = "CanonicalUser" - TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail" - TypeGroup Type = "Group" -) - -// Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. -// -// The ordering of this slice is not guaranteed to be stable across updates. -func (Type) Values() []Type { - return []Type{ - "CanonicalUser", - "AmazonCustomerByEmail", - "Group", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go deleted file mode 100644 index 1070573a5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ /dev/null @@ -1,382 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The requested bucket name is not available. The bucket namespace is shared by -// all users of the system. Select a different name and try again. -type BucketAlreadyExists struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *BucketAlreadyExists) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *BucketAlreadyExists) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *BucketAlreadyExists) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "BucketAlreadyExists" - } - return *e.ErrorCodeOverride -} -func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The bucket you tried to create already exists, and you own it. Amazon S3 -// returns this error in all Amazon Web Services Regions except in the North -// Virginia Region. For legacy compatibility, if you re-create an existing bucket -// that you already own in the North Virginia Region, Amazon S3 returns 200 OK and -// resets the bucket access control lists (ACLs). -type BucketAlreadyOwnedByYou struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *BucketAlreadyOwnedByYou) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *BucketAlreadyOwnedByYou) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *BucketAlreadyOwnedByYou) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "BucketAlreadyOwnedByYou" - } - return *e.ErrorCodeOverride -} -func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The existing object was created with a different encryption type. Subsequent -// -// write requests must include the appropriate encryption parameters in the request -// or while creating the session. -type EncryptionTypeMismatch struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *EncryptionTypeMismatch) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *EncryptionTypeMismatch) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *EncryptionTypeMismatch) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "EncryptionTypeMismatch" - } - return *e.ErrorCodeOverride -} -func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Object is archived and inaccessible until restored. -// -// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval -// storage class, the S3 Glacier Deep Archive storage class, the S3 -// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep -// Archive Access tier, before you can retrieve the object you must first restore a -// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. -// -// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html -// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html -type InvalidObjectState struct { - Message *string - - ErrorCodeOverride *string - - StorageClass StorageClass - AccessTier IntelligentTieringAccessTier - - noSmithyDocumentSerde -} - -func (e *InvalidObjectState) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidObjectState) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidObjectState) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidObjectState" - } - return *e.ErrorCodeOverride -} -func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// You may receive this error in multiple cases. Depending on the reason for the -// error, you may receive one of the messages below: -// -// - Cannot specify both a write offset value and user-defined object metadata -// for existing objects. -// -// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual -// checksum Type: crc32c. -// -// - Request body cannot be empty when 'write offset' is specified. -type InvalidRequest struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequest) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequest) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequest) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequest" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The write offset value that you specified does not match the current object -// -// size. -type InvalidWriteOffset struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidWriteOffset) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidWriteOffset) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidWriteOffset) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidWriteOffset" - } - return *e.ErrorCodeOverride -} -func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified bucket does not exist. -type NoSuchBucket struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchBucket) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchBucket) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchBucket) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchBucket" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified key does not exist. -type NoSuchKey struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchKey) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchKey) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchKey) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchKey" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified multipart upload does not exist. -type NoSuchUpload struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NoSuchUpload) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NoSuchUpload) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NoSuchUpload) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NoSuchUpload" - } - return *e.ErrorCodeOverride -} -func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified content does not exist. -type NotFound struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *NotFound) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *NotFound) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *NotFound) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "NotFound" - } - return *e.ErrorCodeOverride -} -func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// This action is not allowed against this storage tier. -type ObjectAlreadyInActiveTierError struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ObjectAlreadyInActiveTierError) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ObjectAlreadyInActiveTierError) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ObjectAlreadyInActiveTierError" - } - return *e.ErrorCodeOverride -} -func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The source object of the COPY action is not in the active tier and is only -// stored in Amazon S3 Glacier. -type ObjectNotInActiveTierError struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ObjectNotInActiveTierError) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ObjectNotInActiveTierError) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ObjectNotInActiveTierError) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ObjectNotInActiveTierError" - } - return *e.ErrorCodeOverride -} -func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// You have attempted to add more parts than the maximum of 10000 that are -// -// allowed for this object. You can use the CopyObject operation to copy this -// object to another and then add more data to the newly copied object. -type TooManyParts struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyParts) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyParts) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyParts) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyParts" - } - return *e.ErrorCodeOverride -} -func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go deleted file mode 100644 index 97c158937..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ /dev/null @@ -1,4467 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// Specifies the days since the initiation of an incomplete multipart upload that -// Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. -// -// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config -type AbortIncompleteMultipartUpload struct { - - // Specifies the number of days after which Amazon S3 aborts an incomplete - // multipart upload. - DaysAfterInitiation *int32 - - noSmithyDocumentSerde -} - -// Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. -// -// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -type AccelerateConfiguration struct { - - // Specifies the transfer acceleration status of the bucket. - Status BucketAccelerateStatus - - noSmithyDocumentSerde -} - -// Contains the elements that set the ACL permissions for an object per grantee. -type AccessControlPolicy struct { - - // A list of grants. - Grants []Grant - - // Container for the bucket owner's display name and ID. - Owner *Owner - - noSmithyDocumentSerde -} - -// A container for information about access control for replicas. -type AccessControlTranslation struct { - - // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the - // Amazon S3 API Reference. - // - // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html - // - // This member is required. - Owner OwnerOverride - - noSmithyDocumentSerde -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates in any -// combination, and an object must match all of the predicates for the filter to -// apply. -type AnalyticsAndOperator struct { - - // The prefix to use when evaluating an AND predicate: The prefix that an object - // must have to be included in the metrics results. - Prefix *string - - // The list of tags to use when evaluating an AND predicate. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies the configuration and any analyses for the analytics filter of an -// Amazon S3 bucket. -type AnalyticsConfiguration struct { - - // The ID that identifies the analytics configuration. - // - // This member is required. - Id *string - - // Contains data related to access patterns to be collected and made available to - // analyze the tradeoffs between different storage classes. - // - // This member is required. - StorageClassAnalysis *StorageClassAnalysis - - // The filter used to describe a set of objects for analyses. A filter must have - // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no - // filter is provided, all objects will be considered in any analysis. - Filter AnalyticsFilter - - noSmithyDocumentSerde -} - -// Where to publish the analytics results. -type AnalyticsExportDestination struct { - - // A destination signifying output to an S3 bucket. - // - // This member is required. - S3BucketDestination *AnalyticsS3BucketDestination - - noSmithyDocumentSerde -} - -// The filter used to describe a set of objects for analyses. A filter must have -// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no -// filter is provided, all objects will be considered in any analysis. -// -// The following types satisfy this interface: -// -// AnalyticsFilterMemberAnd -// AnalyticsFilterMemberPrefix -// AnalyticsFilterMemberTag -type AnalyticsFilter interface { - isAnalyticsFilter() -} - -// A conjunction (logical AND) of predicates, which is used in evaluating an -// analytics filter. The operator must have at least two predicates. -type AnalyticsFilterMemberAnd struct { - Value AnalyticsAndOperator - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {} - -// The prefix to use when evaluating an analytics filter. -type AnalyticsFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {} - -// The tag to use when evaluating an analytics filter. -type AnalyticsFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {} - -// Contains information about where to publish the analytics results. -type AnalyticsS3BucketDestination struct { - - // The Amazon Resource Name (ARN) of the bucket to which data is exported. - // - // This member is required. - Bucket *string - - // Specifies the file format used when exporting data to Amazon S3. - // - // This member is required. - Format AnalyticsS3ExportFileFormat - - // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to help - // prevent problems if the destination bucket ownership changes. - BucketAccountId *string - - // The prefix to use when exporting data. The prefix is prepended to all results. - Prefix *string - - noSmithyDocumentSerde -} - -// In terms of implementation, a Bucket is a resource. -type Bucket struct { - - // BucketRegion indicates the Amazon Web Services region where the bucket is - // located. If the request contains at least one valid parameter, it is included in - // the response. - BucketRegion *string - - // Date the bucket was created. This date can change when making changes to your - // bucket, such as editing its bucket policy. - CreationDate *time.Time - - // The name of the bucket. - Name *string - - noSmithyDocumentSerde -} - -// Specifies the information about the bucket that will be created. For more -// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. -// -// This functionality is only supported by directory buckets. -// -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -type BucketInfo struct { - - // The number of Zone (Availability Zone or Local Zone) that's used for redundancy - // for the bucket. - DataRedundancy DataRedundancy - - // The type of bucket. - Type BucketType - - noSmithyDocumentSerde -} - -// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. -// -// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html -type BucketLifecycleConfiguration struct { - - // A lifecycle rule for individual objects in an Amazon S3 bucket. - // - // This member is required. - Rules []LifecycleRule - - noSmithyDocumentSerde -} - -// Container for logging status information. -type BucketLoggingStatus struct { - - // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API - // Reference. - // - // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html - LoggingEnabled *LoggingEnabled - - noSmithyDocumentSerde -} - -// Contains all the possible checksum or digest values for an object. -type Checksum struct { - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // be present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This checksum is - // only present if the checksum was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is - // present if the object was uploaded with the CRC-64NVME checksum algorithm, or - // if the object was uploaded without a checksum (and Amazon S3 added the default - // checksum, CRC-64NVME , to the uploaded object). For more information, see [Checking object integrity] in - // the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. When you use the API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. When you use an API - // operation on an object that was uploaded using multipart uploads, this value may - // not be a direct checksum value of the full object. Instead, it's a calculation - // based on the checksum values of each individual part. For more information about - // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User - // Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums - ChecksumSHA256 *string - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - noSmithyDocumentSerde -} - -// Container for all (if there are any) keys between Prefix and the next -// occurrence of the string specified by a delimiter. CommonPrefixes lists keys -// that act like subdirectories in the directory specified by Prefix. For example, -// if the prefix is notes/ and the delimiter is a slash (/) as in -// notes/summer/july, the common prefix is notes/summer/. -type CommonPrefix struct { - - // Container for the specified common prefix. - Prefix *string - - noSmithyDocumentSerde -} - -// The container for the completed multipart upload details. -type CompletedMultipartUpload struct { - - // Array of CompletedPart data types. - // - // If you do not supply a valid Part with your request, the service sends back an - // HTTP 400 response. - Parts []CompletedPart - - noSmithyDocumentSerde -} - -// Details of the parts that were uploaded. -type CompletedPart struct { - - // The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-32 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-32C checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-64NVME - // checksum algorithm to the uploaded object). For more information, see [Checking object integrity]in the - // Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA-1 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA-256 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag returned when the part was uploaded. - ETag *string - - // Part number that identifies the part. This is a positive integer between 1 and - // 10,000. - // - // - General purpose buckets - In CompleteMultipartUpload , when a additional - // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , - // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the - // PartNumber must start at 1 and the part numbers must be consecutive. - // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an - // InvalidPartOrder error code. - // - // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start - // at 1 and the part numbers must be consecutive. - PartNumber *int32 - - noSmithyDocumentSerde -} - -// A container for describing a condition that must be met for the specified -// redirect to apply. For example, 1. If request is for pages in the /docs folder, -// redirect to the /documents folder. 2. If request results in HTTP error 4xx, -// redirect request to another host where you might process the error. -type Condition struct { - - // The HTTP error code when the redirect is applied. In the event of an error, if - // the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html - // . To redirect request for all pages with the prefix docs/ , the key prefix will - // be /docs , which identifies all objects in the docs/ folder. Required when the - // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - KeyPrefixEquals *string - - noSmithyDocumentSerde -} - -type ContinuationEvent struct { - noSmithyDocumentSerde -} - -// Container for all response elements. -type CopyObjectResult struct { - - // The Base64 encoded, 32-bit CRC-32 checksum of the object. This checksum is only - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the object. This checksum is - // present if the object being copied was uploaded with the CRC-64NVME checksum - // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added - // the default checksum, CRC-64NVME , to the uploaded object). For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 digest of the object. This will only be - // present if the object was uploaded with the object. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // Returns the ETag of the new object. The ETag reflects only changes to the - // contents of an object, not its metadata. - ETag *string - - // Creation date of the object. - LastModified *time.Time - - noSmithyDocumentSerde -} - -// Container for all response elements. -type CopyPartResult struct { - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 32-bit CRC-32C checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-64NVME - // checksum algorithm to the uploaded object). For more information, see [Checking object integrity]in the - // Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 160-bit SHA-1 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // Base64 encoded, 256-bit SHA-256 checksum of the part. For more information, see [Checking object integrity] - // in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag of the object. - ETag *string - - // Date and time at which the object was uploaded. - LastModified *time.Time - - noSmithyDocumentSerde -} - -// Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. -// -// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html -type CORSConfiguration struct { - - // A set of origins and methods (cross-origin access that you want to allow). You - // can add up to 100 rules to the configuration. - // - // This member is required. - CORSRules []CORSRule - - noSmithyDocumentSerde -} - -// Specifies a cross-origin access rule for an Amazon S3 bucket. -type CORSRule struct { - - // An HTTP method that you allow the origin to execute. Valid values are GET , PUT - // , HEAD , POST , and DELETE . - // - // This member is required. - AllowedMethods []string - - // One or more origins you want customers to be able to access the bucket from. - // - // This member is required. - AllowedOrigins []string - - // Headers that are specified in the Access-Control-Request-Headers header. These - // headers are allowed in a preflight OPTIONS request. In response to any preflight - // OPTIONS request, Amazon S3 returns any requested headers that are allowed. - AllowedHeaders []string - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []string - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string - - // The time in seconds that your browser is to cache the preflight response for - // the specified resource. - MaxAgeSeconds *int32 - - noSmithyDocumentSerde -} - -// The configuration information for the bucket. -type CreateBucketConfiguration struct { - - // Specifies the information about the bucket that will be created. - // - // This functionality is only supported by directory buckets. - Bucket *BucketInfo - - // Specifies the location where the bucket will be created. - // - // Directory buckets - The location type is Availability Zone or Local Zone. When - // the location type is Local Zone, your Local Zone must be in opt-in status. - // Otherwise, you get an HTTP 400 Bad Request error with the error code Access - // denied . To learn more about opt-in Local Zones, see [Opt-in Dedicated Local Zones]in the Amazon S3 User - // Guide. - // - // This functionality is only supported by directory buckets. - // - // [Opt-in Dedicated Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html - Location *LocationInfo - - // Specifies the Region where the bucket will be created. You might choose a - // Region to optimize latency, minimize costs, or address regulatory requirements. - // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. For more information, see [Accessing a bucket]in the - // Amazon S3 User Guide. - // - // If you don't specify a Region, the bucket is created in the US East (N. - // Virginia) Region (us-east-1) by default. - // - // This functionality is not supported for directory buckets. - // - // [Accessing a bucket]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro - LocationConstraint BucketLocationConstraint - - noSmithyDocumentSerde -} - -// Describes how an uncompressed comma-separated values (CSV)-formatted input -// object is formatted. -type CSVInput struct { - - // Specifies that CSV field values may contain quoted record delimiters and such - // records should be allowed. Default value is FALSE. Setting this value to TRUE - // may lower performance. - AllowQuotedRecordDelimiter *bool - - // A single character used to indicate that a row should be ignored when the - // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . - // - // Default: # - Comments *string - - // A single character used to separate individual fields in a record. You can - // specify an arbitrary delimiter. - FieldDelimiter *string - - // Describes the first line of input. Valid values are: - // - // - NONE : First line is not a header. - // - // - IGNORE : First line is a header, but you can't use the header values to - // indicate the column in an expression. You can use column position (such as _1, - // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). - // - // - Use : First line is a header, and you can use the header value to identify a - // column in an expression ( SELECT "name" FROM OBJECT ). - FileHeaderInfo FileHeaderInfo - - // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . - // - // Type: String - // - // Default: " - // - // Ancestors: CSV - QuoteCharacter *string - - // A single character used for escaping the quotation mark character inside an - // already escaped value. For example, the value """ a , b """ is parsed as " a , - // b " . - QuoteEscapeCharacter *string - - // A single character used to separate individual records in the input. Instead of - // the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// Describes how uncompressed comma-separated values (CSV)-formatted results are -// formatted. -type CSVOutput struct { - - // The value used to separate individual fields in a record. You can specify an - // arbitrary delimiter. - FieldDelimiter *string - - // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . - QuoteCharacter *string - - // The single character used for escaping the quote character inside an already - // escaped value. - QuoteEscapeCharacter *string - - // Indicates whether to use quotation marks around output fields. - // - // - ALWAYS : Always use quotation marks for output fields. - // - // - ASNEEDED : Use quotation marks for output fields when needed. - QuoteFields QuoteFields - - // A single character used to separate individual records in the output. Instead - // of the default value, you can specify an arbitrary delimiter. - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// The container element for optionally specifying the default Object Lock -// retention settings for new objects placed in the specified bucket. -// -// - The DefaultRetention settings require both a mode and a period. -// -// - The DefaultRetention period can be either Days or Years but you must select -// one. You cannot specify Days and Years at the same time. -type DefaultRetention struct { - - // The number of days that you want to specify for the default retention period. - // Must be used with Mode . - Days *int32 - - // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. Must be used with either Days or Years . - Mode ObjectLockRetentionMode - - // The number of years that you want to specify for the default retention period. - // Must be used with Mode . - Years *int32 - - noSmithyDocumentSerde -} - -// Container for the objects to delete. -type Delete struct { - - // The object to delete. - // - // Directory buckets - For directory buckets, an object that's composed entirely - // of whitespace characters is not supported by the DeleteObjects API operation. - // The request will receive a 400 Bad Request error and none of the objects in the - // request will be deleted. - // - // This member is required. - Objects []ObjectIdentifier - - // Element to enable quiet mode for the request. When you add this element, you - // must set its value to true . - Quiet *bool - - noSmithyDocumentSerde -} - -// Information about the deleted object. -type DeletedObject struct { - - // Indicates whether the specified object version that was permanently deleted was - // (true) or was not (false) a delete marker before deletion. In a simple DELETE, - // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. - // - // This functionality is not supported for directory buckets. - DeleteMarker *bool - - // The version ID of the delete marker created as a result of the DELETE - // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. - // - // This functionality is not supported for directory buckets. - DeleteMarkerVersionId *string - - // The name of the deleted object. - Key *string - - // The version ID of the deleted object. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// Information about the delete marker. -type DeleteMarkerEntry struct { - - // Specifies whether the object is (true) or is not (false) the latest version of - // an object. - IsLatest *bool - - // The object key. - Key *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // The account that created the delete marker.> - Owner *Owner - - // Version ID of an object. - VersionId *string - - noSmithyDocumentSerde -} - -// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter -// in your replication configuration, you must also include a -// DeleteMarkerReplication element. If your Filter includes a Tag element, the -// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does -// not support replicating delete markers for tag-based rules. For an example -// configuration, see [Basic Rule Configuration]. -// -// For more information about delete marker replication, see [Basic Rule Configuration]. -// -// If you are using an earlier version of the replication configuration, Amazon S3 -// handles replication of delete markers differently. For more information, see [Backward Compatibility]. -// -// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html -// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations -type DeleteMarkerReplication struct { - - // Indicates whether to replicate delete markers. - // - // Indicates whether to replicate delete markers. - Status DeleteMarkerReplicationStatus - - noSmithyDocumentSerde -} - -// Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). -type Destination struct { - - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store - // the results. - // - // This member is required. - Bucket *string - - // Specify this only in a cross-account scenario (where source and destination - // bucket owners are not the same), and you want to change replica ownership to the - // Amazon Web Services account that owns the destination bucket. If this is not - // specified in the replication configuration, the replicas are owned by same - // Amazon Web Services account that owns the source object. - AccessControlTranslation *AccessControlTranslation - - // Destination bucket owner account ID. In a cross-account scenario, if you direct - // Amazon S3 to change replica ownership to the Amazon Web Services account that - // owns the destination bucket by specifying the AccessControlTranslation - // property, this is the account ID of the destination bucket owner. For more - // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. - // - // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html - Account *string - - // A container that provides information about encryption. If - // SourceSelectionCriteria is specified, you must specify this element. - EncryptionConfiguration *EncryptionConfiguration - - // A container specifying replication metrics-related settings enabling - // replication metrics and events. - Metrics *Metrics - - // A container specifying S3 Replication Time Control (S3 RTC), including whether - // S3 RTC is enabled and the time when all objects and operations on objects must - // be replicated. Must be specified together with a Metrics block. - ReplicationTime *ReplicationTime - - // The storage class to use when replicating objects, such as S3 Standard or - // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. - // - // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 - // API Reference. - // - // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html - StorageClass StorageClass - - noSmithyDocumentSerde -} - -// Contains the type of server-side encryption used. -type Encryption struct { - - // The server-side encryption algorithm used when storing job results in Amazon S3 - // (for example, AES256, aws:kms ). - // - // This member is required. - EncryptionType ServerSideEncryption - - // If the encryption type is aws:kms , this optional value can be used to specify - // the encryption context for the restore results. - KMSContext *string - - // If the encryption type is aws:kms , this optional value specifies the ID of the - // symmetric encryption customer managed key to use for encryption of job results. - // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - KMSKeyId *string - - noSmithyDocumentSerde -} - -// Specifies encryption-related information for an Amazon S3 bucket that is a -// destination for replicated objects. -// -// If you're specifying a customer managed KMS key, we recommend using a fully -// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the -// key within the requester’s account. This behavior can result in data that's -// encrypted with a KMS key that belongs to the requester, and not the bucket -// owner. -type EncryptionConfiguration struct { - - // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web - // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for - // the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - ReplicaKmsKeyID *string - - noSmithyDocumentSerde -} - -// A message that indicates the request is complete and no more messages will be -// sent. You should not assume that the request is complete until the client -// receives an EndEvent . -type EndEvent struct { - noSmithyDocumentSerde -} - -// Container for all error elements. -type Error struct { - - // The error code is a string that uniquely identifies an error condition. It is - // meant to be read and understood by programs that detect and handle errors by - // type. The following is a list of Amazon S3 error codes. For more information, - // see [Error responses]. - // - // - Code: AccessDenied - // - // - Description: Access Denied - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AccountProblem - // - // - Description: There is a problem with your Amazon Web Services account that - // prevents the action from completing successfully. Contact Amazon Web Services - // Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AllAccessDisabled - // - // - Description: All access to this Amazon S3 resource has been disabled. - // Contact Amazon Web Services Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AmbiguousGrantByEmailAddress - // - // - Description: The email address you provided is associated with more than - // one account. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: AuthorizationHeaderMalformed - // - // - Description: The authorization header you provided is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - HTTP Status Code: N/A - // - // - Code: BadDigest - // - // - Description: The Content-MD5 you specified did not match what we received. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketAlreadyExists - // - // - Description: The requested bucket name is not available. The bucket - // namespace is shared by all users of the system. Please select a different name - // and try again. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketAlreadyOwnedByYou - // - // - Description: The bucket you tried to create already exists, and you own it. - // Amazon S3 returns this error in all Amazon Web Services Regions except in the - // North Virginia Region. For legacy compatibility, if you re-create an existing - // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 - // OK and resets the bucket access control lists (ACLs). - // - // - Code: 409 Conflict (in all Regions except the North Virginia Region) - // - // - SOAP Fault Code Prefix: Client - // - // - Code: BucketNotEmpty - // - // - Description: The bucket you tried to delete is not empty. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: CredentialsNotSupported - // - // - Description: This request does not support credentials. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: CrossLocationLoggingProhibited - // - // - Description: Cross-location logging not allowed. Buckets in one geographic - // location cannot log information to a bucket in another location. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: EntityTooSmall - // - // - Description: Your proposed upload is smaller than the minimum allowed - // object size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: EntityTooLarge - // - // - Description: Your proposed upload exceeds the maximum allowed object size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: ExpiredToken - // - // - Description: The provided token has expired. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IllegalVersioningConfigurationException - // - // - Description: Indicates that the versioning configuration specified in the - // request is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IncompleteBody - // - // - Description: You did not provide the number of bytes specified by the - // Content-Length HTTP header - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: IncorrectNumberOfFilesInPostRequest - // - // - Description: POST requires exactly one file upload per request. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InlineDataTooLarge - // - // - Description: Inline data exceeds the maximum allowed size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InternalError - // - // - Description: We encountered an internal error. Please try again. - // - // - HTTP Status Code: 500 Internal Server Error - // - // - SOAP Fault Code Prefix: Server - // - // - Code: InvalidAccessKeyId - // - // - Description: The Amazon Web Services access key ID you provided does not - // exist in our records. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidAddressingHeader - // - // - Description: You must specify the Anonymous role. - // - // - HTTP Status Code: N/A - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidArgument - // - // - Description: Invalid Argument - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidBucketName - // - // - Description: The specified bucket is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidBucketState - // - // - Description: The request is not valid with the current state of the bucket. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidDigest - // - // - Description: The Content-MD5 you specified is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidEncryptionAlgorithmError - // - // - Description: The encryption request you specified is not valid. The valid - // value is AES256. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidLocationConstraint - // - // - Description: The specified location constraint is not valid. For more - // information about Regions, see [How to Select a Region for Your Buckets]. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidObjectState - // - // - Description: The action is not valid for the current state of the object. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPart - // - // - Description: One or more of the specified parts could not be found. The - // part might not have been uploaded, or the specified entity tag might not have - // matched the part's entity tag. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPartOrder - // - // - Description: The list of parts was not in ascending order. Parts list must - // be specified in order by part number. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPayer - // - // - Description: All access to this object has been disabled. Please contact - // Amazon Web Services Support for further assistance. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidPolicyDocument - // - // - Description: The content of the form does not meet the conditions specified - // in the policy document. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRange - // - // - Description: The requested range cannot be satisfied. - // - // - HTTP Status Code: 416 Requested Range Not Satisfiable - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRequest - // - // - Description: Please use AWS4-HMAC-SHA256 . - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: SOAP requests must be made over an HTTPS connection. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported for buckets - // with non-DNS compliant names. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported for buckets - // with periods (.) in their names. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual - // style requests. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration is not supported on this - // bucket. Contact Amazon Web Services Support for more information. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidRequest - // - // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this - // bucket. Contact Amazon Web Services Support for more information. - // - // - HTTP Status Code: 400 Bad Request - // - // - Code: N/A - // - // - Code: InvalidSecurity - // - // - Description: The provided security credentials are not valid. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidSOAPRequest - // - // - Description: The SOAP request body is invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidStorageClass - // - // - Description: The storage class you specified is not valid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidTargetBucketForLogging - // - // - Description: The target bucket for logging does not exist, is not owned by - // you, or does not have the appropriate grants for the log-delivery group. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidToken - // - // - Description: The provided token is malformed or otherwise invalid. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: InvalidURI - // - // - Description: Couldn't parse the specified URI. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: KeyTooLongError - // - // - Description: Your key is too long. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedACLError - // - // - Description: The XML you provided was not well-formed or did not validate - // against our published schema. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedPOSTRequest - // - // - Description: The body of your POST request is not well-formed - // multipart/form-data. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MalformedXML - // - // - Description: This happens when the user sends malformed XML (XML that - // doesn't conform to the published XSD) for the configuration. The error message - // is, "The XML you provided was not well-formed or did not validate against our - // published schema." - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MaxMessageLengthExceeded - // - // - Description: Your request was too big. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MaxPostPreDataLengthExceededError - // - // - Description: Your POST request fields preceding the upload file were too - // large. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MetadataTooLarge - // - // - Description: Your metadata headers exceed the maximum allowed metadata size. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MethodNotAllowed - // - // - Description: The specified method is not allowed against this resource. - // - // - HTTP Status Code: 405 Method Not Allowed - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingAttachment - // - // - Description: A SOAP attachment was expected, but none were found. - // - // - HTTP Status Code: N/A - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingContentLength - // - // - Description: You must provide the Content-Length HTTP header. - // - // - HTTP Status Code: 411 Length Required - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingRequestBodyError - // - // - Description: This happens when the user sends an empty XML document as a - // request. The error message is, "Request body is empty." - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingSecurityElement - // - // - Description: The SOAP 1.1 request is missing a security element. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: MissingSecurityHeader - // - // - Description: Your request is missing a required header. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoLoggingStatusForKey - // - // - Description: There is no such thing as a logging status subresource for a - // key. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchBucket - // - // - Description: The specified bucket does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchBucketPolicy - // - // - Description: The specified bucket does not have a bucket policy. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchKey - // - // - Description: The specified key does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchLifecycleConfiguration - // - // - Description: The lifecycle configuration does not exist. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchUpload - // - // - Description: The specified multipart upload does not exist. The upload ID - // might be invalid, or the multipart upload might have been aborted or completed. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NoSuchVersion - // - // - Description: Indicates that the version ID specified in the request does - // not match an existing version. - // - // - HTTP Status Code: 404 Not Found - // - // - SOAP Fault Code Prefix: Client - // - // - Code: NotImplemented - // - // - Description: A header you provided implies functionality that is not - // implemented. - // - // - HTTP Status Code: 501 Not Implemented - // - // - SOAP Fault Code Prefix: Server - // - // - Code: NotSignedUp - // - // - Description: Your account is not signed up for the Amazon S3 service. You - // must sign up before you can use Amazon S3. You can sign up at the following URL: - // [Amazon S3] - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: OperationAborted - // - // - Description: A conflicting conditional action is currently in progress - // against this resource. Try again. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: PermanentRedirect - // - // - Description: The bucket you are attempting to access must be addressed - // using the specified endpoint. Send all future requests to this endpoint. - // - // - HTTP Status Code: 301 Moved Permanently - // - // - SOAP Fault Code Prefix: Client - // - // - Code: PreconditionFailed - // - // - Description: At least one of the preconditions you specified did not hold. - // - // - HTTP Status Code: 412 Precondition Failed - // - // - SOAP Fault Code Prefix: Client - // - // - Code: Redirect - // - // - Description: Temporary redirect. - // - // - HTTP Status Code: 307 Moved Temporarily - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RestoreAlreadyInProgress - // - // - Description: Object restore is already in progress. - // - // - HTTP Status Code: 409 Conflict - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestIsNotMultiPartContent - // - // - Description: Bucket POST must be of the enclosure-type multipart/form-data. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTimeout - // - // - Description: Your socket connection to the server was not read from or - // written to within the timeout period. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTimeTooSkewed - // - // - Description: The difference between the request time and the server's time - // is too large. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: RequestTorrentOfBucketError - // - // - Description: Requesting the torrent file of a bucket is not permitted. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: SignatureDoesNotMatch - // - // - Description: The request signature we calculated does not match the - // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. - // - // - HTTP Status Code: 403 Forbidden - // - // - SOAP Fault Code Prefix: Client - // - // - Code: ServiceUnavailable - // - // - Description: Service is unable to handle request. - // - // - HTTP Status Code: 503 Service Unavailable - // - // - SOAP Fault Code Prefix: Server - // - // - Code: SlowDown - // - // - Description: Reduce your request rate. - // - // - HTTP Status Code: 503 Slow Down - // - // - SOAP Fault Code Prefix: Server - // - // - Code: TemporaryRedirect - // - // - Description: You are being redirected to the bucket while DNS updates. - // - // - HTTP Status Code: 307 Moved Temporarily - // - // - SOAP Fault Code Prefix: Client - // - // - Code: TokenRefreshRequired - // - // - Description: The provided token must be refreshed. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: TooManyBuckets - // - // - Description: You have attempted to create more buckets than allowed. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UnexpectedContent - // - // - Description: This request does not support content. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UnresolvableGrantByEmailAddress - // - // - Description: The email address you provided does not match any account on - // record. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // - Code: UserKeyMustBeSpecified - // - // - Description: The bucket POST must contain the specified field name. If it - // is specified, check the order of the fields. - // - // - HTTP Status Code: 400 Bad Request - // - // - SOAP Fault Code Prefix: Client - // - // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro - // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html - // [Amazon S3]: http://aws.amazon.com/s3 - // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html - Code *string - - // The error key. - Key *string - - // The error message contains a generic description of the error condition in - // English. It is intended for a human audience. Simple programs display the - // message directly to the end user if they encounter an error condition they don't - // know how or don't care to handle. Sophisticated programs with more exhaustive - // error handling and proper internationalization are more likely to ignore the - // error message. - Message *string - - // The version ID of the error. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// If the CreateBucketMetadataTableConfiguration request succeeds, but S3 -// -// Metadata was unable to create the table, this structure contains the error code -// and error message. -type ErrorDetails struct { - - // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error code. - // The possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and - // s3tables:PutTablePolicy permissions, and then try again. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableAlreadyExists - The table that you specified already exists in the - // table bucket's namespace. Specify a different table name. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableBucketNotFound - The table bucket that you specified doesn't exist in - // this Amazon Web Services Region and account. Create or choose a different table - // bucket. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - ErrorCode *string - - // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error - // message. The possible error codes and error messages are as follows: - // - // - AccessDeniedCreatingResources - You don't have sufficient permissions to - // create the required resources. Make sure that you have - // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and - // s3tables:PutTablePolicy permissions, and then try again. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - AccessDeniedWritingToTable - Unable to write to the metadata table because - // of missing resource permissions. To fix the resource policy, Amazon S3 needs to - // create a new metadata table. To create a new metadata table, you must delete the - // metadata configuration for this bucket, and then create a new metadata - // configuration. - // - // - DestinationTableNotFound - The destination table doesn't exist. To create a - // new metadata table, you must delete the metadata configuration for this bucket, - // and then create a new metadata configuration. - // - // - ServerInternalError - An internal error has occurred. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableAlreadyExists - The table that you specified already exists in the - // table bucket's namespace. Specify a different table name. To create a new - // metadata table, you must delete the metadata configuration for this bucket, and - // then create a new metadata configuration. - // - // - TableBucketNotFound - The table bucket that you specified doesn't exist in - // this Amazon Web Services Region and account. Create or choose a different table - // bucket. To create a new metadata table, you must delete the metadata - // configuration for this bucket, and then create a new metadata configuration. - ErrorMessage *string - - noSmithyDocumentSerde -} - -// The error information. -type ErrorDocument struct { - - // The object key name to use when a 4XX class error occurs. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Key *string - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for Amazon EventBridge. -type EventBridgeConfiguration struct { - noSmithyDocumentSerde -} - -// Optional configuration to replicate existing source bucket objects. -// -// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in -// the Amazon S3 User Guide. -// -// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html -type ExistingObjectReplication struct { - - // Specifies whether Amazon S3 replicates existing source bucket objects. - // - // This member is required. - Status ExistingObjectReplicationStatus - - noSmithyDocumentSerde -} - -// Specifies the Amazon S3 object key name to filter on. An object key name is the -// name assigned to an object in your Amazon S3 bucket. You specify whether to -// filter on the suffix or prefix of the object key name. A prefix is a specific -// string of characters at the beginning of an object key name, which you can use -// to organize objects. For example, you can start the key names of related objects -// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to -// find objects in a bucket with key names that have the same prefix. A suffix is -// similar to a prefix, but it is at the end of the object key name instead of at -// the beginning. -type FilterRule struct { - - // The object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the - // Amazon S3 User Guide. - // - // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - Name FilterRuleName - - // The value that the filter searches for in object key names. - Value *string - - noSmithyDocumentSerde -} - -// The metadata table configuration for a general purpose bucket. -type GetBucketMetadataTableConfigurationResult struct { - - // The metadata table configuration for a general purpose bucket. - // - // This member is required. - MetadataTableConfigurationResult *MetadataTableConfigurationResult - - // The status of the metadata table. The status values are: - // - // - CREATING - The metadata table is in the process of being created in the - // specified table bucket. - // - // - ACTIVE - The metadata table has been created successfully and records are - // being delivered to the table. - // - // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is - // unable to deliver records. See ErrorDetails for details. - // - // This member is required. - Status *string - - // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 - // Metadata was unable to create the table, this structure contains the error code - // and error message. - Error *ErrorDetails - - noSmithyDocumentSerde -} - -// A collection of parts associated with a multipart upload. -type GetObjectAttributesParts struct { - - // Indicates whether the returned list of parts is truncated. A value of true - // indicates that the list was truncated. A list can be truncated if the number of - // parts exceeds the limit returned in the MaxParts element. - IsTruncated *bool - - // The maximum number of parts allowed in the response. - MaxParts *int32 - - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the PartNumberMarker request parameter in a - // subsequent request. - NextPartNumberMarker *string - - // The marker for the current part. - PartNumberMarker *string - - // A container for elements related to a particular part. A response can contain - // zero or more Parts elements. - // - // - General purpose buckets - For GetObjectAttributes , if a additional checksum - // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 - // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the - // request, the response doesn't return Part . - // - // - Directory buckets - For GetObjectAttributes , no matter whether a additional - // checksum is applied to the object specified in the request, the response returns - // Part . - Parts []ObjectPart - - // The total number of parts. - TotalPartsCount *int32 - - noSmithyDocumentSerde -} - -// Container for S3 Glacier job parameters. -type GlacierJobParameters struct { - - // Retrieval tier at which the restore will be processed. - // - // This member is required. - Tier Tier - - noSmithyDocumentSerde -} - -// Container for grant information. -type Grant struct { - - // The person being granted permissions. - Grantee *Grantee - - // Specifies the permission given to the grantee. - Permission Permission - - noSmithyDocumentSerde -} - -// Container for the person being granted permissions. -type Grantee struct { - - // Type of grantee - // - // This member is required. - Type Type - - // Screen name of the grantee. - DisplayName *string - - // Email address of the grantee. - // - // Using email addresses to specify a grantee is only supported in the following - // Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the - // Amazon Web Services General Reference. - // - // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region - EmailAddress *string - - // The canonical user ID of the grantee. - ID *string - - // URI of the grantee group. - URI *string - - noSmithyDocumentSerde -} - -// Container for the Suffix element. -type IndexDocument struct { - - // A suffix that is appended to a request that is for a directory on the website - // endpoint. (For example, if the suffix is index.html and you make a request to - // samplebucket/images/ , the data that is returned will be for the object with the - // key name images/index.html .) The suffix must not be empty and must not include - // a slash character. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Suffix *string - - noSmithyDocumentSerde -} - -// Container element that identifies who initiated the multipart upload. -type Initiator struct { - - // Name of the Principal. - // - // This functionality is not supported for directory buckets. - DisplayName *string - - // If the principal is an Amazon Web Services account, it provides the Canonical - // User ID. If the principal is an IAM User, it provides a user ARN value. - // - // Directory buckets - If the principal is an Amazon Web Services account, it - // provides the Amazon Web Services account ID. If the principal is an IAM User, it - // provides a user ARN value. - ID *string - - noSmithyDocumentSerde -} - -// Describes the serialization format of the object. -type InputSerialization struct { - - // Describes the serialization of a CSV-encoded object. - CSV *CSVInput - - // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default - // Value: NONE. - CompressionType CompressionType - - // Specifies JSON as object's input serialization format. - JSON *JSONInput - - // Specifies Parquet as object's input serialization format. - Parquet *ParquetInput - - noSmithyDocumentSerde -} - -// A container for specifying S3 Intelligent-Tiering filters. The filters -// determine the subset of objects to which the rule applies. -type IntelligentTieringAndOperator struct { - - // An object key name prefix that identifies the subset of objects to which the - // configuration applies. - Prefix *string - - // All of these tags must exist in the object's tag set in order for the - // configuration to apply. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. -// -// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. -// -// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access -type IntelligentTieringConfiguration struct { - - // The ID used to identify the S3 Intelligent-Tiering configuration. - // - // This member is required. - Id *string - - // Specifies the status of the configuration. - // - // This member is required. - Status IntelligentTieringStatus - - // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. - // - // This member is required. - Tierings []Tiering - - // Specifies a bucket filter. The configuration only includes objects that meet - // the filter's criteria. - Filter *IntelligentTieringFilter - - noSmithyDocumentSerde -} - -// The Filter is used to identify objects that the S3 Intelligent-Tiering -// configuration applies to. -type IntelligentTieringFilter struct { - - // A conjunction (logical AND) of predicates, which is used in evaluating a - // metrics filter. The operator must have at least two predicates, and an object - // must match all of the predicates in order for the filter to apply. - And *IntelligentTieringAndOperator - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // A container of a key value name pair. - Tag *Tag - - noSmithyDocumentSerde -} - -// Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see [GET Bucket inventory]in the Amazon S3 API Reference. -// -// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html -type InventoryConfiguration struct { - - // Contains information about where to publish the inventory results. - // - // This member is required. - Destination *InventoryDestination - - // The ID used to identify the inventory configuration. - // - // This member is required. - Id *string - - // Object versions to include in the inventory list. If set to All , the list - // includes all the object versions, which adds the version-related fields - // VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the - // list does not contain these version-related fields. - // - // This member is required. - IncludedObjectVersions InventoryIncludedObjectVersions - - // Specifies whether the inventory is enabled or disabled. If set to True , an - // inventory list is generated. If set to False , no inventory list is generated. - // - // This member is required. - IsEnabled *bool - - // Specifies the schedule for generating inventory results. - // - // This member is required. - Schedule *InventorySchedule - - // Specifies an inventory filter. The inventory only includes objects that meet - // the filter's criteria. - Filter *InventoryFilter - - // Contains the optional fields that are included in the inventory results. - OptionalFields []InventoryOptionalField - - noSmithyDocumentSerde -} - -// Specifies the inventory configuration for an Amazon S3 bucket. -type InventoryDestination struct { - - // Contains the bucket name, file format, bucket owner (optional), and prefix - // (optional) where inventory results are published. - // - // This member is required. - S3BucketDestination *InventoryS3BucketDestination - - noSmithyDocumentSerde -} - -// Contains the type of server-side encryption used to encrypt the inventory -// results. -type InventoryEncryption struct { - - // Specifies the use of SSE-KMS to encrypt delivered inventory reports. - SSEKMS *SSEKMS - - // Specifies the use of SSE-S3 to encrypt delivered inventory reports. - SSES3 *SSES3 - - noSmithyDocumentSerde -} - -// Specifies an inventory filter. The inventory only includes objects that meet -// the filter's criteria. -type InventoryFilter struct { - - // The prefix that an object must have to be included in the inventory results. - // - // This member is required. - Prefix *string - - noSmithyDocumentSerde -} - -// Contains the bucket name, file format, bucket owner (optional), and prefix -// (optional) where inventory results are published. -type InventoryS3BucketDestination struct { - - // The Amazon Resource Name (ARN) of the bucket where inventory results will be - // published. - // - // This member is required. - Bucket *string - - // Specifies the output format of the inventory results. - // - // This member is required. - Format InventoryFormat - - // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. - // - // Although this value is optional, we strongly recommend that you set it to help - // prevent problems if the destination bucket ownership changes. - AccountId *string - - // Contains the type of server-side encryption used to encrypt the inventory - // results. - Encryption *InventoryEncryption - - // The prefix that is prepended to all inventory results. - Prefix *string - - noSmithyDocumentSerde -} - -// Specifies the schedule for generating inventory results. -type InventorySchedule struct { - - // Specifies how frequently inventory results are produced. - // - // This member is required. - Frequency InventoryFrequency - - noSmithyDocumentSerde -} - -// Specifies JSON as object's input serialization format. -type JSONInput struct { - - // The type of JSON. Valid values: Document, Lines. - Type JSONType - - noSmithyDocumentSerde -} - -// Specifies JSON as request's output serialization format. -type JSONOutput struct { - - // The value used to separate individual records in the output. If no value is - // specified, Amazon S3 uses a newline character ('\n'). - RecordDelimiter *string - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for Lambda notifications. -type LambdaFunctionConfiguration struct { - - // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see [Supported Event Types]in the Amazon S3 User Guide. - // - // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes - // when the specified event type occurs. - // - // This member is required. - LambdaFunctionArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// Container for the expiration for the lifecycle of the object. -// -// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. -// -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -type LifecycleExpiration struct { - - // Indicates at what date the object is to be moved or deleted. The date value - // must conform to the ISO 8601 format. The time is always midnight UTC. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Date *time.Time - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int32 - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false the - // policy takes no action. This cannot be specified with Days or Date in a - // Lifecycle Expiration Policy. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - ExpiredObjectDeleteMarker *bool - - noSmithyDocumentSerde -} - -// A lifecycle rule for individual objects in an Amazon S3 bucket. -// -// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. -// -// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html -type LifecycleRule struct { - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is - // not currently being applied. - // - // This member is required. - Status ExpirationStatus - - // Specifies the days since the initiation of an incomplete multipart upload that - // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. - // - // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload - - // Specifies the expiration for the lifecycle of the object in the form of date, - // days and, whether the object has a delete marker. - Expiration *LifecycleExpiration - - // The Filter is used to identify objects that a Lifecycle Rule applies to. A - // Filter must have exactly one of Prefix , Tag , or And specified. Filter is - // required if the LifecycleRule does not contain a Prefix element. - // - // Tag filters are not supported for directory buckets. - Filter *LifecycleRuleFilter - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 - // permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) to - // request that Amazon S3 delete noncurrent object versions at a specific period in - // the object's lifetime. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - NoncurrentVersionExpiration *NoncurrentVersionExpiration - - // Specifies the transition rule for the lifecycle rule that describes when - // noncurrent objects transition to a specific storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action to - // request that Amazon S3 transition noncurrent object versions to a specific - // storage class at a set period in the object's lifetime. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - NoncurrentVersionTransitions []NoncurrentVersionTransition - - // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // Deprecated: This member has been deprecated. - Prefix *string - - // Specifies when an Amazon S3 object transitions to a specified storage class. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Transitions []Transition - - noSmithyDocumentSerde -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more -// predicates. The Lifecycle Rule will apply to any object matching all of the -// predicates configured inside the And operator. -type LifecycleRuleAndOperator struct { - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string - - // All of these tags must exist in the object's tag set in order for the rule to - // apply. - Tags []Tag - - noSmithyDocumentSerde -} - -// The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , -// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the -// Lifecycle Rule applies to all objects in the bucket. -type LifecycleRuleFilter struct { - - // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more - // predicates. The Lifecycle Rule will apply to any object matching all of the - // predicates configured inside the And operator. - And *LifecycleRuleAndOperator - - // Minimum object size to which the rule applies. - ObjectSizeGreaterThan *int64 - - // Maximum object size to which the rule applies. - ObjectSizeLessThan *int64 - - // Prefix identifying one or more objects to which the rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // This tag must exist in the object's tag set in order for the rule to apply. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - Tag *Tag - - noSmithyDocumentSerde -} - -// Specifies the location where the bucket will be created. -// -// For directory buckets, the location type is Availability Zone or Local Zone. -// For more information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. -// -// This functionality is only supported by directory buckets. -// -// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html -type LocationInfo struct { - - // The name of the location where the bucket will be created. - // - // For directory buckets, the name of the location is the Zone ID of the - // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An - // example AZ ID value is usw2-az1 . - Name *string - - // The type of location where the bucket will be created. - Type LocationType - - noSmithyDocumentSerde -} - -// Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API -// Reference. -// -// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html -type LoggingEnabled struct { - - // Specifies the bucket where you want Amazon S3 to store server access logs. You - // can have your logs delivered to any bucket that you own, including the same - // bucket that is being logged. You can also configure multiple buckets to deliver - // their logs to the same target bucket. In this case, you should choose a - // different TargetPrefix for each source bucket so that the delivered log files - // can be distinguished by key. - // - // This member is required. - TargetBucket *string - - // A prefix for all log object keys. If you store log files from multiple Amazon - // S3 buckets in a single bucket, you can use a prefix to distinguish which log - // files came from which bucket. - // - // This member is required. - TargetPrefix *string - - // Container for granting information. - // - // Buckets that use the bucket owner enforced setting for Object Ownership don't - // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. - // - // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general - TargetGrants []TargetGrant - - // Amazon S3 key format for log objects. - TargetObjectKeyFormat *TargetObjectKeyFormat - - noSmithyDocumentSerde -} - -// A metadata key-value pair to store with an object. -type MetadataEntry struct { - - // Name of the object. - Name *string - - // Value of the object. - Value *string - - noSmithyDocumentSerde -} - -// The metadata table configuration for a general purpose bucket. -type MetadataTableConfiguration struct { - - // The destination information for the metadata table configuration. The - // destination table bucket must be in the same Region and Amazon Web Services - // account as the general purpose bucket. The specified metadata table name must be - // unique within the aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - S3TablesDestination *S3TablesDestination - - noSmithyDocumentSerde -} - -// The metadata table configuration for a general purpose bucket. The destination -// -// table bucket must be in the same Region and Amazon Web Services account as the -// general purpose bucket. The specified metadata table name must be unique within -// the aws_s3_metadata namespace in the destination table bucket. -type MetadataTableConfigurationResult struct { - - // The destination information for the metadata table configuration. The - // destination table bucket must be in the same Region and Amazon Web Services - // account as the general purpose bucket. The specified metadata table name must be - // unique within the aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - S3TablesDestinationResult *S3TablesDestinationResult - - noSmithyDocumentSerde -} - -// A container specifying replication metrics-related settings enabling -// -// replication metrics and events. -type Metrics struct { - - // Specifies whether the replication metrics are enabled. - // - // This member is required. - Status MetricsStatus - - // A container specifying the time threshold for emitting the - // s3:Replication:OperationMissedThreshold event. - EventThreshold *ReplicationTimeValue - - noSmithyDocumentSerde -} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsAndOperator struct { - - // The access point ARN used when evaluating an AND predicate. - AccessPointArn *string - - // The prefix used when evaluating an AND predicate. - Prefix *string - - // The list of tags used when evaluating an AND predicate. - Tags []Tag - - noSmithyDocumentSerde -} - -// Specifies a metrics configuration for the CloudWatch request metrics (specified -// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. -// -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html -type MetricsConfiguration struct { - - // The ID used to identify the metrics configuration. The ID has a 64 character - // limit and can only contain letters, numbers, periods, dashes, and underscores. - // - // This member is required. - Id *string - - // Specifies a metrics configuration filter. The metrics configuration will only - // include objects that meet the filter's criteria. A filter must be a prefix, an - // object tag, an access point ARN, or a conjunction (MetricsAndOperator). - Filter MetricsFilter - - noSmithyDocumentSerde -} - -// Specifies a metrics configuration filter. The metrics configuration only -// includes objects that meet the filter's criteria. A filter must be a prefix, an -// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see [PutBucketMetricsConfiguration]. -// -// The following types satisfy this interface: -// -// MetricsFilterMemberAccessPointArn -// MetricsFilterMemberAnd -// MetricsFilterMemberPrefix -// MetricsFilterMemberTag -// -// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html -type MetricsFilter interface { - isMetricsFilter() -} - -// The access point ARN used when evaluating a metrics filter. -type MetricsFilterMemberAccessPointArn struct { - Value string - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} - -// A conjunction (logical AND) of predicates, which is used in evaluating a -// metrics filter. The operator must have at least two predicates, and an object -// must match all of the predicates in order for the filter to apply. -type MetricsFilterMemberAnd struct { - Value MetricsAndOperator - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberAnd) isMetricsFilter() {} - -// The prefix used when evaluating a metrics filter. -type MetricsFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberPrefix) isMetricsFilter() {} - -// The tag used when evaluating a metrics filter. -type MetricsFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*MetricsFilterMemberTag) isMetricsFilter() {} - -// Container for the MultipartUpload for the Amazon S3 object. -type MultipartUpload struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time - - // Identifies who initiated the multipart upload. - Initiator *Initiator - - // Key of the object for which the multipart upload was initiated. - Key *string - - // Specifies the owner of the object that is part of the multipart upload. - // - // Directory buckets - The bucket owner is returned as the object owner for all - // the objects. - Owner *Owner - - // The class of storage used to store the object. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - StorageClass StorageClass - - // Upload ID that identifies the multipart upload. - UploadId *string - - noSmithyDocumentSerde -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3 -// permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) to -// request that Amazon S3 delete noncurrent object versions at a specific period in -// the object's lifetime. -// -// This parameter applies to general purpose buckets only. It is not supported for -// directory bucket lifecycle configurations. -type NoncurrentVersionExpiration struct { - - // Specifies how many noncurrent versions Amazon S3 will retain. You can specify - // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any - // additional noncurrent versions beyond the specified number to retain. For more - // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html - NewerNoncurrentVersions *int32 - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 - // User Guide. - // - // This parameter applies to general purpose buckets only. It is not supported for - // directory bucket lifecycle configurations. - // - // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations - NoncurrentDays *int32 - - noSmithyDocumentSerde -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR , -// GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled -// (or versioning is suspended), you can set this action to request that Amazon S3 -// transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA , -// INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a -// specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - - // Specifies how many noncurrent versions Amazon S3 will retain in the same - // storage class before transitioning objects. You can specify up to 100 noncurrent - // versions to retain. Amazon S3 will transition any additional noncurrent versions - // beyond the specified number to retain. For more information about noncurrent - // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. - // - // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html - NewerNoncurrentVersions *int32 - - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. - // - // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations - NoncurrentDays *int32 - - // The class of storage used to store the object. - StorageClass TransitionStorageClass - - noSmithyDocumentSerde -} - -// A container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off for the bucket. -type NotificationConfiguration struct { - - // Enables delivery of events to Amazon EventBridge. - EventBridgeConfiguration *EventBridgeConfiguration - - // Describes the Lambda functions to invoke and the events for which to invoke - // them. - LambdaFunctionConfigurations []LambdaFunctionConfiguration - - // The Amazon Simple Queue Service queues to publish messages to and the events - // for which to publish messages. - QueueConfigurations []QueueConfiguration - - // The topic to which notifications are sent and the events for which - // notifications are generated. - TopicConfigurations []TopicConfiguration - - noSmithyDocumentSerde -} - -// Specifies object key name filtering rules. For information about key name -// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. -// -// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html -type NotificationConfigurationFilter struct { - - // A container for object key name prefix and suffix filtering rules. - Key *S3KeyFilter - - noSmithyDocumentSerde -} - -// An object consists of data and its descriptive metadata. -type Object struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // The entity tag is a hash of the object. The ETag reflects changes only to the - // contents of an object, not its metadata. The ETag may or may not be an MD5 - // digest of the object data. Whether or not it is depends on how the object was - // created and how it is encrypted as described below: - // - // - Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 - // or plaintext, have ETags that are an MD5 digest of their object data. - // - // - Objects created by the PUT Object, POST Object, or Copy operation, or - // through the Amazon Web Services Management Console, and are encrypted by SSE-C - // or SSE-KMS, have ETags that are not an MD5 digest of their object data. - // - // - If an object is created by either the Multipart Upload or Part Copy - // operation, the ETag is not an MD5 digest, regardless of the method of - // encryption. If an object is larger than 16 MB, the Amazon Web Services - // Management Console will upload or copy that object as a Multipart Upload, and - // therefore the ETag will not be an MD5 digest. - // - // Directory buckets - MD5 is not supported by directory buckets. - ETag *string - - // The name that you assign to an object. You use the object key to retrieve the - // object. - Key *string - - // Creation date of the object. - LastModified *time.Time - - // The owner of the object - // - // Directory buckets - The bucket owner is returned as the object owner. - Owner *Owner - - // Specifies the restoration status of an object. Objects in certain storage - // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see [Working with archived objects]in the - // Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Only the S3 Express - // One Zone storage class is supported by directory buckets to store objects. - // - // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html - RestoreStatus *RestoreStatus - - // Size in bytes of the object - Size *int64 - - // The class of storage used to store the object. - // - // Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. - StorageClass ObjectStorageClass - - noSmithyDocumentSerde -} - -// Object Identifier is unique value to identify objects. -type ObjectIdentifier struct { - - // Key name of the object. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // This member is required. - Key *string - - // An entity tag (ETag) is an identifier assigned by a web server to a specific - // version of a resource found at a URL. This header field makes the request method - // conditional on ETags . - // - // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings - // unique to the object. - ETag *string - - // If present, the objects are deleted only if its modification times matches the - // provided Timestamp . - // - // This functionality is only supported for directory buckets. - LastModifiedTime *time.Time - - // If present, the objects are deleted only if its size matches the provided size - // in bytes. - // - // This functionality is only supported for directory buckets. - Size *int64 - - // Version ID for the specific version of the object to delete. - // - // This functionality is not supported for directory buckets. - VersionId *string - - noSmithyDocumentSerde -} - -// The container element for Object Lock configuration parameters. -type ObjectLockConfiguration struct { - - // Indicates whether this bucket has an Object Lock configuration enabled. Enable - // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. - ObjectLockEnabled ObjectLockEnabled - - // Specifies the Object Lock rule for the specified object. Enable the this rule - // when you apply ObjectLockConfiguration to a bucket. Bucket settings require - // both a mode and a period. The period can be either Days or Years but you must - // select one. You cannot specify Days and Years at the same time. - Rule *ObjectLockRule - - noSmithyDocumentSerde -} - -// A legal hold configuration for an object. -type ObjectLockLegalHold struct { - - // Indicates whether the specified object has a legal hold in place. - Status ObjectLockLegalHoldStatus - - noSmithyDocumentSerde -} - -// A Retention configuration for an object. -type ObjectLockRetention struct { - - // Indicates the Retention mode for the specified object. - Mode ObjectLockRetentionMode - - // The date on which this Object Lock Retention will expire. - RetainUntilDate *time.Time - - noSmithyDocumentSerde -} - -// The container element for an Object Lock rule. -type ObjectLockRule struct { - - // The default Object Lock retention mode and period that you want to apply to new - // objects placed in the specified bucket. Bucket settings require both a mode and - // a period. The period can be either Days or Years but you must select one. You - // cannot specify Days and Years at the same time. - DefaultRetention *DefaultRetention - - noSmithyDocumentSerde -} - -// A container for elements related to an individual part. -type ObjectPart struct { - - // The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-32 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-32C checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-64NVME - // checksum algorithm, or if the object was uploaded without a checksum (and Amazon - // S3 added the default checksum, CRC-64NVME , to the uploaded object). For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA-1 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is - // present if the multipart upload request was created with the SHA-256 checksum - // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // The part number identifying the part. This value is a positive integer between - // 1 and 10,000. - PartNumber *int32 - - // The size of the uploaded part in bytes. - Size *int64 - - noSmithyDocumentSerde -} - -// The version of an object. -type ObjectVersion struct { - - // The algorithm that was used to create a checksum of the object. - ChecksumAlgorithm []ChecksumAlgorithm - - // The checksum type that is used to calculate the object’s checksum value. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumType ChecksumType - - // The entity tag is an MD5 hash of that version of the object. - ETag *string - - // Specifies whether the object is (true) or is not (false) the latest version of - // an object. - IsLatest *bool - - // The object key. - Key *string - - // Date and time when the object was last modified. - LastModified *time.Time - - // Specifies the owner of the object. - Owner *Owner - - // Specifies the restoration status of an object. Objects in certain storage - // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see [Working with archived objects]in the - // Amazon S3 User Guide. - // - // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html - RestoreStatus *RestoreStatus - - // Size in bytes of the object. - Size *int64 - - // The class of storage used to store the object. - StorageClass ObjectVersionStorageClass - - // Version ID of an object. - VersionId *string - - noSmithyDocumentSerde -} - -// Describes the location where the restore job's output is stored. -type OutputLocation struct { - - // Describes an S3 location that will receive the results of the restore request. - S3 *S3Location - - noSmithyDocumentSerde -} - -// Describes how results of the Select job are serialized. -type OutputSerialization struct { - - // Describes the serialization of CSV-encoded Select results. - CSV *CSVOutput - - // Specifies JSON as request's output serialization format. - JSON *JSONOutput - - noSmithyDocumentSerde -} - -// Container for the owner's display name and ID. -type Owner struct { - - // Container for the display name of the owner. This value is only supported in - // the following Amazon Web Services Regions: - // - // - US East (N. Virginia) - // - // - US West (N. California) - // - // - US West (Oregon) - // - // - Asia Pacific (Singapore) - // - // - Asia Pacific (Sydney) - // - // - Asia Pacific (Tokyo) - // - // - Europe (Ireland) - // - // - South America (São Paulo) - // - // This functionality is not supported for directory buckets. - DisplayName *string - - // Container for the ID of the owner. - ID *string - - noSmithyDocumentSerde -} - -// The container element for a bucket's ownership controls. -type OwnershipControls struct { - - // The container element for an ownership control rule. - // - // This member is required. - Rules []OwnershipControlsRule - - noSmithyDocumentSerde -} - -// The container element for an ownership control rule. -type OwnershipControlsRule struct { - - // The container element for object ownership for a bucket's ownership controls. - // - // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the - // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. - // - // ObjectWriter - The uploading account will own the object if the object is - // uploaded with the bucket-owner-full-control canned ACL. - // - // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer - // affect permissions. The bucket owner automatically owns and has full control - // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or specify bucket owner full control ACLs (such as the predefined - // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). - // - // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are - // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where - // you must control access for each object individually. For more information about - // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. - // - // This functionality is not supported for directory buckets. Directory buckets - // use the bucket owner enforced setting for S3 Object Ownership. - // - // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html - // - // This member is required. - ObjectOwnership ObjectOwnership - - noSmithyDocumentSerde -} - -// Container for Parquet. -type ParquetInput struct { - noSmithyDocumentSerde -} - -// Container for elements related to a part. -type Part struct { - - // The Base64 encoded, 32-bit CRC-32 checksum of the part. This checksum is - // present if the object was uploaded with the CRC-32 checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32 *string - - // The Base64 encoded, 32-bit CRC-32C checksum of the part. This checksum is - // present if the object was uploaded with the CRC-32C checksum algorithm. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC32C *string - - // The Base64 encoded, 64-bit CRC-64NVME checksum of the part. This checksum is - // present if the multipart upload request was created with the CRC-64NVME - // checksum algorithm, or if the object was uploaded without a checksum (and Amazon - // S3 added the default checksum, CRC-64NVME , to the uploaded object). For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumCRC64NVME *string - - // The Base64 encoded, 160-bit SHA-1 checksum of the part. This checksum is - // present if the object was uploaded with the SHA-1 checksum algorithm. For more - // information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA1 *string - - // The Base64 encoded, 256-bit SHA-256 checksum of the part. This checksum is - // present if the object was uploaded with the SHA-256 checksum algorithm. For - // more information, see [Checking object integrity]in the Amazon S3 User Guide. - // - // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html - ChecksumSHA256 *string - - // Entity tag returned when the part was uploaded. - ETag *string - - // Date and time at which the part was uploaded. - LastModified *time.Time - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int32 - - // Size in bytes of the uploaded part data. - Size *int64 - - noSmithyDocumentSerde -} - -// Amazon S3 keys for log objects are partitioned in the following format: -// -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] -// -// PartitionedPrefix defaults to EventTime delivery when server access logs are -// delivered. -type PartitionedPrefix struct { - - // Specifies the partition date source for the partitioned prefix. - // PartitionDateSource can be EventTime or DeliveryTime . - // - // For DeliveryTime , the time in the log file names corresponds to the delivery - // time for the log files. - // - // For EventTime , The logs delivered are for a specific day only. The year, month, - // and day correspond to the day on which the event occurred, and the hour, minutes - // and seconds are set to 00 in the key. - PartitionDateSource PartitionDateSource - - noSmithyDocumentSerde -} - -// The container element for a bucket's policy status. -type PolicyStatus struct { - - // The policy status for this bucket. TRUE indicates that this bucket is public. - // FALSE indicates that the bucket is not public. - IsPublic *bool - - noSmithyDocumentSerde -} - -// This data type contains information about progress of an operation. -type Progress struct { - - // The current number of uncompressed object bytes processed. - BytesProcessed *int64 - - // The current number of bytes of records payload data returned. - BytesReturned *int64 - - // The current number of object bytes scanned. - BytesScanned *int64 - - noSmithyDocumentSerde -} - -// This data type contains information about the progress event of an operation. -type ProgressEvent struct { - - // The Progress event details. - Details *Progress - - noSmithyDocumentSerde -} - -// The PublicAccessBlock configuration that you want to apply to this Amazon S3 -// bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in -// the Amazon S3 User Guide. -// -// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status -type PublicAccessBlockConfiguration struct { - - // Specifies whether Amazon S3 should block public access control lists (ACLs) for - // this bucket and objects in this bucket. Setting this element to TRUE causes the - // following behavior: - // - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. - // - // - PUT Object calls fail if the request includes a public ACL. - // - // - PUT Bucket calls fail if the request includes a public ACL. - // - // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls *bool - - // Specifies whether Amazon S3 should block public bucket policies for this - // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. - // - // Enabling this setting doesn't affect existing bucket policies. - BlockPublicPolicy *bool - - // Specifies whether Amazon S3 should ignore public ACLs for this bucket and - // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. - // - // Enabling this setting doesn't affect the persistence of any existing ACLs and - // doesn't prevent new public ACLs from being set. - IgnorePublicAcls *bool - - // Specifies whether Amazon S3 should restrict public bucket policies for this - // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Services service principals and authorized users within this account - // if the bucket has a public policy. - // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. - RestrictPublicBuckets *bool - - noSmithyDocumentSerde -} - -// Specifies the configuration for publishing messages to an Amazon Simple Queue -// Service (Amazon SQS) queue when Amazon S3 detects specified events. -type QueueConfiguration struct { - - // A collection of bucket events for which to send notifications - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // This member is required. - QueueArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// The container for the records event. -type RecordsEvent struct { - - // The byte array of partial, one or more result records. S3 Select doesn't - // guarantee that a record will be self-contained in one record frame. To ensure - // continuous streaming of data, S3 Select might split the same record across - // multiple record frames instead of aggregating the results in memory. Some S3 - // clients (for example, the SDK for Java) handle this behavior by creating a - // ByteStream out of the response by default. Other clients might not handle this - // behavior by default. In those cases, you must aggregate the results on the - // client side and parse the response. - Payload []byte - - noSmithyDocumentSerde -} - -// Specifies how requests are redirected. In the event of an error, you can -// specify a different error code to return. -type Redirect struct { - - // The host name to use in the redirect request. - HostName *string - - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string - - // Protocol to use when redirecting requests. The default is the protocol that is - // used in the original request. - Protocol Protocol - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one - // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - ReplaceKeyPrefixWith *string - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - ReplaceKeyWith *string - - noSmithyDocumentSerde -} - -// Specifies the redirect behavior of all requests to a website endpoint of an -// Amazon S3 bucket. -type RedirectAllRequestsTo struct { - - // Name of the host where requests are redirected. - // - // This member is required. - HostName *string - - // Protocol to use when redirecting requests. The default is the protocol that is - // used in the original request. - Protocol Protocol - - noSmithyDocumentSerde -} - -// A filter that you can specify for selection for modifications on replicas. -// Amazon S3 doesn't replicate replica modifications by default. In the latest -// version of replication configuration (when Filter is specified), you can -// specify this element and set the status to Enabled to replicate modifications -// on replicas. -// -// If you don't specify the Filter element, Amazon S3 assumes that the replication -// configuration is the earlier version, V1. In the earlier version, this element -// is not allowed. -type ReplicaModifications struct { - - // Specifies whether Amazon S3 replicates modifications on replicas. - // - // This member is required. - Status ReplicaModificationsStatus - - noSmithyDocumentSerde -} - -// A container for replication rules. You can add up to 1,000 rules. The maximum -// size of a replication configuration is 2 MB. -type ReplicationConfiguration struct { - - // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in - // the Amazon S3 User Guide. - // - // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html - // - // This member is required. - Role *string - - // A container for one or more replication rules. A replication configuration must - // have at least one rule and can contain a maximum of 1,000 rules. - // - // This member is required. - Rules []ReplicationRule - - noSmithyDocumentSerde -} - -// Specifies which Amazon S3 objects to replicate and where to store the replicas. -type ReplicationRule struct { - - // A container for information about the replication destination and its - // configurations including enabling the S3 Replication Time Control (S3 RTC). - // - // This member is required. - Destination *Destination - - // Specifies whether the rule is enabled. - // - // This member is required. - Status ReplicationRuleStatus - - // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter - // in your replication configuration, you must also include a - // DeleteMarkerReplication element. If your Filter includes a Tag element, the - // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does - // not support replicating delete markers for tag-based rules. For an example - // configuration, see [Basic Rule Configuration]. - // - // For more information about delete marker replication, see [Basic Rule Configuration]. - // - // If you are using an earlier version of the replication configuration, Amazon S3 - // handles replication of delete markers differently. For more information, see [Backward Compatibility]. - // - // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html - // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations - DeleteMarkerReplication *DeleteMarkerReplication - - // Optional configuration to replicate existing source bucket objects. - // - // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in - // the Amazon S3 User Guide. - // - // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html - ExistingObjectReplication *ExistingObjectReplication - - // A filter that identifies the subset of objects to which the replication rule - // applies. A Filter must specify exactly one Prefix , Tag , or an And child - // element. - Filter *ReplicationRuleFilter - - // A unique identifier for the rule. The maximum value is 255 characters. - ID *string - - // An object key name prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - // - // Deprecated: This member has been deprecated. - Prefix *string - - // The priority indicates which rule has precedence whenever two or more - // replication rules conflict. Amazon S3 will attempt to replicate objects - // according to all replication rules. However, if there are two or more rules with - // the same destination bucket, then objects will be replicated according to the - // rule with the highest priority. The higher the number, the higher the priority. - // - // For more information, see [Replication] in the Amazon S3 User Guide. - // - // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html - Priority *int32 - - // A container that describes additional filters for identifying the source - // objects that you want to replicate. You can choose to enable or disable the - // replication of these objects. Currently, Amazon S3 supports only the filter that - // you can specify for objects created with server-side encryption using a customer - // managed key stored in Amazon Web Services Key Management Service (SSE-KMS). - SourceSelectionCriteria *SourceSelectionCriteria - - noSmithyDocumentSerde -} - -// A container for specifying rule filters. The filters determine the subset of -// objects to which the rule applies. This element is required only if you specify -// more than one filter. -// -// For example: -// -// - If you specify both a Prefix and a Tag filter, wrap these filters in an And -// tag. -// -// - If you specify a filter based on multiple tags, wrap the Tag elements in an -// And tag. -type ReplicationRuleAndOperator struct { - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - Prefix *string - - // An array of tags containing key and value pairs. - Tags []Tag - - noSmithyDocumentSerde -} - -// A filter that identifies the subset of objects to which the replication rule -// applies. A Filter must specify exactly one Prefix , Tag , or an And child -// element. -type ReplicationRuleFilter struct { - - // A container for specifying rule filters. The filters determine the subset of - // objects to which the rule applies. This element is required only if you specify - // more than one filter. For example: - // - // - If you specify both a Prefix and a Tag filter, wrap these filters in an And - // tag. - // - // - If you specify a filter based on multiple tags, wrap the Tag elements in an - // And tag. - And *ReplicationRuleAndOperator - - // An object key name prefix that identifies the subset of objects to which the - // rule applies. - // - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. - // - // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints - Prefix *string - - // A container for specifying a tag key and value. - // - // The rule applies only to objects that have the tag in their tag set. - Tag *Tag - - noSmithyDocumentSerde -} - -// A container specifying S3 Replication Time Control (S3 RTC) related -// -// information, including whether S3 RTC is enabled and the time when all objects -// and operations on objects must be replicated. Must be specified together with a -// Metrics block. -type ReplicationTime struct { - - // Specifies whether the replication time is enabled. - // - // This member is required. - Status ReplicationTimeStatus - - // A container specifying the time by which replication should be complete for - // all objects and operations on objects. - // - // This member is required. - Time *ReplicationTimeValue - - noSmithyDocumentSerde -} - -// A container specifying the time value for S3 Replication Time Control (S3 RTC) -// -// and replication metrics EventThreshold . -type ReplicationTimeValue struct { - - // Contains an integer specifying time in minutes. - // - // Valid value: 15 - Minutes *int32 - - noSmithyDocumentSerde -} - -// Container for Payer. -type RequestPaymentConfiguration struct { - - // Specifies who pays for the download and request fees. - // - // This member is required. - Payer Payer - - noSmithyDocumentSerde -} - -// Container for specifying if periodic QueryProgress messages should be sent. -type RequestProgress struct { - - // Specifies whether periodic QueryProgress frames should be sent. Valid values: - // TRUE, FALSE. Default value: FALSE. - Enabled *bool - - noSmithyDocumentSerde -} - -// Container for restore job parameters. -type RestoreRequest struct { - - // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . - // - // The Days element is required for regular restores, and must not be provided for - // select requests. - Days *int32 - - // The optional description for the job. - Description *string - - // S3 Glacier related parameters pertaining to this job. Do not use with restores - // that specify OutputLocation . - GlacierJobParameters *GlacierJobParameters - - // Describes the location where the restore job's output is stored. - OutputLocation *OutputLocation - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // Describes the parameters for Select job types. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - SelectParameters *SelectParameters - - // Retrieval tier at which the restore will be processed. - Tier Tier - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // Type of restore request. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - Type RestoreRequestType - - noSmithyDocumentSerde -} - -// Specifies the restoration status of an object. Objects in certain storage -// classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see [Working with archived objects]in the -// Amazon S3 User Guide. -// -// This functionality is not supported for directory buckets. Only the S3 Express -// One Zone storage class is supported by directory buckets to store objects. -// -// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html -type RestoreStatus struct { - - // Specifies whether the object is currently being restored. If the object - // restoration is in progress, the header returns the value TRUE . For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="true" - // - // If the object restoration has completed, the header returns the value FALSE . - // For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" - // - // If the object hasn't been restored, there is no header response. - IsRestoreInProgress *bool - - // Indicates when the restored copy will expire. This value is populated only if - // the object has already been restored. For example: - // - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" - RestoreExpiryDate *time.Time - - noSmithyDocumentSerde -} - -// Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. -// -// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects -type RoutingRule struct { - - // Container for redirect information. You can redirect requests to another host, - // to another page, or with another protocol. In the event of an error, you can - // specify a different error code to return. - // - // This member is required. - Redirect *Redirect - - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition - - noSmithyDocumentSerde -} - -// A container for object key name prefix and suffix filtering rules. -type S3KeyFilter struct { - - // A list of containers for the key-value pair that defines the criteria for the - // filter rule. - FilterRules []FilterRule - - noSmithyDocumentSerde -} - -// Describes an Amazon S3 location that will receive the results of the restore -// request. -type S3Location struct { - - // The name of the bucket where the restore results will be placed. - // - // This member is required. - BucketName *string - - // The prefix that is prepended to the restore results for this request. - // - // This member is required. - Prefix *string - - // A list of grants that control access to the staged results. - AccessControlList []Grant - - // The canned ACL to apply to the restore results. - CannedACL ObjectCannedACL - - // Contains the type of server-side encryption used. - Encryption *Encryption - - // The class of storage used to store the restore results. - StorageClass StorageClass - - // The tag-set that is applied to the restore results. - Tagging *Tagging - - // A list of metadata to store with the restore results in S3. - UserMetadata []MetadataEntry - - noSmithyDocumentSerde -} - -// The destination information for the metadata table configuration. The -// -// destination table bucket must be in the same Region and Amazon Web Services -// account as the general purpose bucket. The specified metadata table name must be -// unique within the aws_s3_metadata namespace in the destination table bucket. -type S3TablesDestination struct { - - // The Amazon Resource Name (ARN) for the table bucket that's specified as the - // destination in the metadata table configuration. The destination table bucket - // must be in the same Region and Amazon Web Services account as the general - // purpose bucket. - // - // This member is required. - TableBucketArn *string - - // The name for the metadata table in your metadata table configuration. The - // specified metadata table name must be unique within the aws_s3_metadata - // namespace in the destination table bucket. - // - // This member is required. - TableName *string - - noSmithyDocumentSerde -} - -// The destination information for the metadata table configuration. The -// -// destination table bucket must be in the same Region and Amazon Web Services -// account as the general purpose bucket. The specified metadata table name must be -// unique within the aws_s3_metadata namespace in the destination table bucket. -type S3TablesDestinationResult struct { - - // The Amazon Resource Name (ARN) for the metadata table in the metadata table - // configuration. The specified metadata table name must be unique within the - // aws_s3_metadata namespace in the destination table bucket. - // - // This member is required. - TableArn *string - - // The Amazon Resource Name (ARN) for the table bucket that's specified as the - // destination in the metadata table configuration. The destination table bucket - // must be in the same Region and Amazon Web Services account as the general - // purpose bucket. - // - // This member is required. - TableBucketArn *string - - // The name for the metadata table in your metadata table configuration. The - // specified metadata table name must be unique within the aws_s3_metadata - // namespace in the destination table bucket. - // - // This member is required. - TableName *string - - // The table bucket namespace for the metadata table in your metadata table - // configuration. This value is always aws_s3_metadata . - // - // This member is required. - TableNamespace *string - - noSmithyDocumentSerde -} - -// Specifies the byte range of the object to get the records from. A record is -// processed when its first byte is contained by the range. This parameter is -// optional, but when specified, it must not be empty. See RFC 2616, Section -// 14.35.1 about how to specify the start and end of the range. -type ScanRange struct { - - // Specifies the end of the byte range. This parameter is optional. Valid values: - // non-negative integers. The default value is one less than the size of the object - // being queried. If only the End parameter is supplied, it is interpreted to mean - // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. - End *int64 - - // Specifies the start of the byte range. This parameter is optional. Valid - // values: non-negative integers. The default value is 0. If only start is - // supplied, it means scan from that point to the end of the file. For example, 50 - // means scan from byte 50 until the end of the file. - Start *int64 - - noSmithyDocumentSerde -} - -// The container for selecting objects from a content event stream. -// -// The following types satisfy this interface: -// -// SelectObjectContentEventStreamMemberCont -// SelectObjectContentEventStreamMemberEnd -// SelectObjectContentEventStreamMemberProgress -// SelectObjectContentEventStreamMemberRecords -// SelectObjectContentEventStreamMemberStats -type SelectObjectContentEventStream interface { - isSelectObjectContentEventStream() -} - -// The Continuation Event. -type SelectObjectContentEventStreamMemberCont struct { - Value ContinuationEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {} - -// The End Event. -type SelectObjectContentEventStreamMemberEnd struct { - Value EndEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {} - -// The Progress Event. -type SelectObjectContentEventStreamMemberProgress struct { - Value ProgressEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {} - -// The Records Event. -type SelectObjectContentEventStreamMemberRecords struct { - Value RecordsEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {} - -// The Stats Event. -type SelectObjectContentEventStreamMemberStats struct { - Value StatsEvent - - noSmithyDocumentSerde -} - -func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} - -// Amazon S3 Select is no longer available to new customers. Existing customers of -// Amazon S3 Select can continue to use the feature as usual. [Learn more] -// -// Describes the parameters for Select job types. -// -// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. -// -// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ -// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html -// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html -type SelectParameters struct { - - // Amazon S3 Select is no longer available to new customers. Existing customers of - // Amazon S3 Select can continue to use the feature as usual. [Learn more] - // - // The expression that is used to query the object. - // - // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ - // - // This member is required. - Expression *string - - // The type of the provided expression (for example, SQL). - // - // This member is required. - ExpressionType ExpressionType - - // Describes the serialization format of the object. - // - // This member is required. - InputSerialization *InputSerialization - - // Describes how the results of the Select job are serialized. - // - // This member is required. - OutputSerialization *OutputSerialization - - noSmithyDocumentSerde -} - -// Describes the default server-side encryption to apply to new objects in the -// bucket. If a PUT Object request doesn't specify any server-side encryption, this -// default encryption will be applied. For more information, see [PutBucketEncryption]. -// -// - General purpose buckets - If you don't specify a customer managed key at -// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( -// aws/s3 ) in your Amazon Web Services account the first time that you add an -// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS -// key for SSE-KMS. -// -// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per -// directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key]( aws/s3 ) isn't -// supported. -// -// - Directory buckets - For directory buckets, there are only two supported -// options for server-side encryption: SSE-S3 and SSE-KMS. -// -// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html -// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk -type ServerSideEncryptionByDefault struct { - - // Server-side encryption algorithm to use for the default encryption. - // - // For directory buckets, there are only two supported values for server-side - // encryption: AES256 and aws:kms . - // - // This member is required. - SSEAlgorithm ServerSideEncryption - - // Amazon Web Services Key Management Service (KMS) customer managed key ID to use - // for the default encryption. - // - // - General purpose buckets - This parameter is allowed if and only if - // SSEAlgorithm is set to aws:kms or aws:kms:dsse . - // - // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is - // set to aws:kms . - // - // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the - // KMS key. - // - // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // - Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // - Key Alias: alias/alias-name - // - // If you are using encryption with cross-account or Amazon Web Services service - // operations, you must use a fully qualified KMS key ARN. For more information, - // see [Using encryption for cross-account operations]. - // - // - General purpose buckets - If you're specifying a customer managed KMS key, - // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias - // instead, then KMS resolves the key within the requester’s account. This behavior - // can result in data that's encrypted with a KMS key that belongs to the - // requester, and not the bucket owner. Also, if you use a key ID, you can run into - // a LogDestination undeliverable error when creating a VPC flow log. - // - // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory - // bucket, only use the key ID or key ARN. The key alias format of the KMS key - // isn't supported. - // - // Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. - // - // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy - // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk - // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html - KMSMasterKeyID *string - - noSmithyDocumentSerde -} - -// Specifies the default server-side-encryption configuration. -type ServerSideEncryptionConfiguration struct { - - // Container for information about a particular server-side encryption - // configuration rule. - // - // This member is required. - Rules []ServerSideEncryptionRule - - noSmithyDocumentSerde -} - -// Specifies the default server-side encryption configuration. -// -// - General purpose buckets - If you're specifying a customer managed KMS key, -// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias -// instead, then KMS resolves the key within the requester’s account. This behavior -// can result in data that's encrypted with a KMS key that belongs to the -// requester, and not the bucket owner. -// -// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory -// bucket, only use the key ID or key ARN. The key alias format of the KMS key -// isn't supported. -// -// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk -type ServerSideEncryptionRule struct { - - // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, this - // default encryption will be applied. - ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault - - // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side - // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects - // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. - // - // - General purpose buckets - By default, S3 Bucket Key is not enabled. For - // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. - // - // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT - // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't - // supported, when you copy SSE-KMS encrypted objects from general purpose buckets - // to directory buckets, from directory buckets to general purpose buckets, or - // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a - // call to KMS every time a copy request is made for a KMS-encrypted object. - // - // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html - // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html - // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job - // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html - // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops - BucketKeyEnabled *bool - - noSmithyDocumentSerde -} - -// The established temporary security credentials of the session. -// -// Directory buckets - These session credentials are only supported for the -// authentication and authorization of Zonal endpoint API operations on directory -// buckets. -type SessionCredentials struct { - - // A unique identifier that's associated with a secret access key. The access key - // ID and the secret access key are used together to sign programmatic Amazon Web - // Services requests cryptographically. - // - // This member is required. - AccessKeyId *string - - // Temporary security credentials expire after a specified interval. After - // temporary credentials expire, any calls that you make with those credentials - // will fail. So you must generate a new set of temporary credentials. Temporary - // credentials cannot be extended or refreshed beyond the original specified - // interval. - // - // This member is required. - Expiration *time.Time - - // A key that's used with the access key ID to cryptographically sign programmatic - // Amazon Web Services requests. Signing a request identifies the sender and - // prevents the request from being altered. - // - // This member is required. - SecretAccessKey *string - - // A part of the temporary security credentials. The session token is used to - // validate the temporary security credentials. - // - // This member is required. - SessionToken *string - - noSmithyDocumentSerde -} - -// To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. -// -// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] -type SimplePrefix struct { - noSmithyDocumentSerde -} - -// A container that describes additional filters for identifying the source -// objects that you want to replicate. You can choose to enable or disable the -// replication of these objects. Currently, Amazon S3 supports only the filter that -// you can specify for objects created with server-side encryption using a customer -// managed key stored in Amazon Web Services Key Management Service (SSE-KMS). -type SourceSelectionCriteria struct { - - // A filter that you can specify for selections for modifications on replicas. - // Amazon S3 doesn't replicate replica modifications by default. In the latest - // version of replication configuration (when Filter is specified), you can - // specify this element and set the status to Enabled to replicate modifications - // on replicas. - // - // If you don't specify the Filter element, Amazon S3 assumes that the replication - // configuration is the earlier version, V1. In the earlier version, this element - // is not allowed - ReplicaModifications *ReplicaModifications - - // A container for filter information for the selection of Amazon S3 objects - // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria - // in the replication configuration, this element is required. - SseKmsEncryptedObjects *SseKmsEncryptedObjects - - noSmithyDocumentSerde -} - -// Specifies the use of SSE-KMS to encrypt delivered inventory reports. -type SSEKMS struct { - - // Specifies the ID of the Key Management Service (KMS) symmetric encryption - // customer managed key to use for encrypting inventory reports. - // - // This member is required. - KeyId *string - - noSmithyDocumentSerde -} - -// A container for filter information for the selection of S3 objects encrypted -// with Amazon Web Services KMS. -type SseKmsEncryptedObjects struct { - - // Specifies whether Amazon S3 replicates objects created with server-side - // encryption using an Amazon Web Services KMS key stored in Amazon Web Services - // Key Management Service. - // - // This member is required. - Status SseKmsEncryptedObjectsStatus - - noSmithyDocumentSerde -} - -// Specifies the use of SSE-S3 to encrypt delivered inventory reports. -type SSES3 struct { - noSmithyDocumentSerde -} - -// Container for the stats details. -type Stats struct { - - // The total number of uncompressed object bytes processed. - BytesProcessed *int64 - - // The total number of bytes of records payload data returned. - BytesReturned *int64 - - // The total number of object bytes scanned. - BytesScanned *int64 - - noSmithyDocumentSerde -} - -// Container for the Stats Event. -type StatsEvent struct { - - // The Stats event details. - Details *Stats - - noSmithyDocumentSerde -} - -// Specifies data related to access patterns to be collected and made available to -// analyze the tradeoffs between different storage classes for an Amazon S3 bucket. -type StorageClassAnalysis struct { - - // Specifies how data related to the storage class analysis for an Amazon S3 - // bucket should be exported. - DataExport *StorageClassAnalysisDataExport - - noSmithyDocumentSerde -} - -// Container for data related to the storage class analysis for an Amazon S3 -// bucket for export. -type StorageClassAnalysisDataExport struct { - - // The place to store the data for an analysis. - // - // This member is required. - Destination *AnalyticsExportDestination - - // The version of the output schema to use when exporting data. Must be V_1 . - // - // This member is required. - OutputSchemaVersion StorageClassAnalysisSchemaVersion - - noSmithyDocumentSerde -} - -// A container of a key value name pair. -type Tag struct { - - // Name of the object key. - // - // This member is required. - Key *string - - // Value of the tag. - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -// Container for TagSet elements. -type Tagging struct { - - // A collection for a set of tags - // - // This member is required. - TagSet []Tag - - noSmithyDocumentSerde -} - -// Container for granting information. -// -// Buckets that use the bucket owner enforced setting for Object Ownership don't -// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. -// -// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general -type TargetGrant struct { - - // Container for the person being granted permissions. - Grantee *Grantee - - // Logging permissions assigned to the grantee for the bucket. - Permission BucketLogsPermission - - noSmithyDocumentSerde -} - -// Amazon S3 key format for log objects. Only one format, PartitionedPrefix or -// SimplePrefix, is allowed. -type TargetObjectKeyFormat struct { - - // Partitioned S3 key for log objects. - PartitionedPrefix *PartitionedPrefix - - // To use the simple format for S3 keys for log objects. To specify SimplePrefix - // format, set SimplePrefix to {}. - SimplePrefix *SimplePrefix - - noSmithyDocumentSerde -} - -// The S3 Intelligent-Tiering storage class is designed to optimize storage costs -// by automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. -type Tiering struct { - - // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 - // Intelligent-Tiering storage class. - // - // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access - // - // This member is required. - AccessTier IntelligentTieringAccessTier - - // The number of consecutive days of no access after which an object will be - // eligible to be transitioned to the corresponding tier. The minimum number of - // days specified for Archive Access tier must be at least 90 days and Deep Archive - // Access tier must be at least 180 days. The maximum can be up to 2 years (730 - // days). - // - // This member is required. - Days *int32 - - noSmithyDocumentSerde -} - -// A container for specifying the configuration for publication of messages to an -// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects -// specified events. -type TopicConfiguration struct { - - // The Amazon S3 bucket event about which to send notifications. For more - // information, see [Supported Event Types]in the Amazon S3 User Guide. - // - // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html - // - // This member is required. - Events []Event - - // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 - // publishes a message when it detects events of the specified type. - // - // This member is required. - TopicArn *string - - // Specifies object key name filtering rules. For information about key name - // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. - // - // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html - Filter *NotificationConfigurationFilter - - // An optional unique identifier for configurations in a notification - // configuration. If you don't provide one, Amazon S3 will assign an ID. - Id *string - - noSmithyDocumentSerde -} - -// Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 -// User Guide. -// -// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html -type Transition struct { - - // Indicates when objects are transitioned to the specified storage class. The - // date value must be in ISO 8601 format. The time is always midnight UTC. - Date *time.Time - - // Indicates the number of days after creation when objects are transitioned to - // the specified storage class. The value must be a positive integer. - Days *int32 - - // The storage class to which you want the object to transition. - StorageClass TransitionStorageClass - - noSmithyDocumentSerde -} - -// Describes the versioning state of an Amazon S3 bucket. For more information, -// see [PUT Bucket versioning]in the Amazon S3 API Reference. -// -// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html -type VersioningConfiguration struct { - - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA delete. - // If the bucket has never been so configured, this element is not returned. - MFADelete MFADelete - - // The versioning state of the bucket. - Status BucketVersioningStatus - - noSmithyDocumentSerde -} - -// Specifies website configuration parameters for an Amazon S3 bucket. -type WebsiteConfiguration struct { - - // The name of the error document for the website. - ErrorDocument *ErrorDocument - - // The name of the index document for the website. - IndexDocument *IndexDocument - - // The redirect behavior for every request to this bucket's website endpoint. - // - // If you specify this property, you can't specify any other property. - RedirectAllRequestsTo *RedirectAllRequestsTo - - // Rules that define when a redirect is applied and the redirect behavior. - RoutingRules []RoutingRule - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -// UnknownUnionMember is returned when a union member is returned over the wire, -// but has an unknown tag. -type UnknownUnionMember struct { - Tag string - Value []byte - - noSmithyDocumentSerde -} - -func (*UnknownUnionMember) isAnalyticsFilter() {} -func (*UnknownUnionMember) isMetricsFilter() {} -func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go deleted file mode 100644 index 0e664c59c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go +++ /dev/null @@ -1,23 +0,0 @@ -package s3 - -// This contains helper methods to set resolver URI into the context object. If they are ever used for -// something other than S3, they should be moved to internal/context/context.go - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type s3resolvedURI struct{} - -// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2 -func setS3ResolvedURI(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, s3resolvedURI{}, value) -} - -// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2 -func getS3ResolvedURI(ctx context.Context) string { - v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string) - return v -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go deleted file mode 100644 index 97a56bd31..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ /dev/null @@ -1,5702 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package s3 - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpAbortMultipartUpload struct { -} - -func (*validateOpAbortMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AbortMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAbortMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCompleteMultipartUpload struct { -} - -func (*validateOpCompleteMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CompleteMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCompleteMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCopyObject struct { -} - -func (*validateOpCopyObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CopyObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCopyObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateBucket struct { -} - -func (*validateOpCreateBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateBucketMetadataTableConfiguration struct { -} - -func (*validateOpCreateBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateMultipartUpload struct { -} - -func (*validateOpCreateMultipartUpload) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateMultipartUploadInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateMultipartUploadInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateSession struct { -} - -func (*validateOpCreateSession) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateSessionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateSessionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketAnalyticsConfiguration struct { -} - -func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketCors struct { -} - -func (*validateOpDeleteBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketEncryption struct { -} - -func (*validateOpDeleteBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucket struct { -} - -func (*validateOpDeleteBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketInventoryConfiguration struct { -} - -func (*validateOpDeleteBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketLifecycle struct { -} - -func (*validateOpDeleteBucketLifecycle) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketLifecycleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketLifecycleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketMetadataTableConfiguration struct { -} - -func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketMetricsConfiguration struct { -} - -func (*validateOpDeleteBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketOwnershipControls struct { -} - -func (*validateOpDeleteBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketPolicy struct { -} - -func (*validateOpDeleteBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketReplication struct { -} - -func (*validateOpDeleteBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketTagging struct { -} - -func (*validateOpDeleteBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteBucketWebsite struct { -} - -func (*validateOpDeleteBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObject struct { -} - -func (*validateOpDeleteObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObjects struct { -} - -func (*validateOpDeleteObjects) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeleteObjectTagging struct { -} - -func (*validateOpDeleteObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeleteObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeleteObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDeletePublicAccessBlock struct { -} - -func (*validateOpDeletePublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DeletePublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDeletePublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAccelerateConfiguration struct { -} - -func (*validateOpGetBucketAccelerateConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAcl struct { -} - -func (*validateOpGetBucketAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketAnalyticsConfiguration struct { -} - -func (*validateOpGetBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketCors struct { -} - -func (*validateOpGetBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketEncryption struct { -} - -func (*validateOpGetBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketInventoryConfiguration struct { -} - -func (*validateOpGetBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLifecycleConfiguration struct { -} - -func (*validateOpGetBucketLifecycleConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLocation struct { -} - -func (*validateOpGetBucketLocation) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLocationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLocationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketLogging struct { -} - -func (*validateOpGetBucketLogging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketLoggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketLoggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketMetadataTableConfiguration struct { -} - -func (*validateOpGetBucketMetadataTableConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketMetricsConfiguration struct { -} - -func (*validateOpGetBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketNotificationConfiguration struct { -} - -func (*validateOpGetBucketNotificationConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketOwnershipControls struct { -} - -func (*validateOpGetBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketPolicy struct { -} - -func (*validateOpGetBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketPolicyStatus struct { -} - -func (*validateOpGetBucketPolicyStatus) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketPolicyStatusInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketPolicyStatusInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketReplication struct { -} - -func (*validateOpGetBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketRequestPayment struct { -} - -func (*validateOpGetBucketRequestPayment) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketRequestPaymentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketRequestPaymentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketTagging struct { -} - -func (*validateOpGetBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketVersioning struct { -} - -func (*validateOpGetBucketVersioning) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketVersioningInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketVersioningInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetBucketWebsite struct { -} - -func (*validateOpGetBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectAcl struct { -} - -func (*validateOpGetObjectAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectAttributes struct { -} - -func (*validateOpGetObjectAttributes) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectAttributesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectAttributesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObject struct { -} - -func (*validateOpGetObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectLegalHold struct { -} - -func (*validateOpGetObjectLegalHold) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectLegalHoldInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectLegalHoldInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectLockConfiguration struct { -} - -func (*validateOpGetObjectLockConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectLockConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectLockConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectRetention struct { -} - -func (*validateOpGetObjectRetention) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectRetentionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectRetentionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectTagging struct { -} - -func (*validateOpGetObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetObjectTorrent struct { -} - -func (*validateOpGetObjectTorrent) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetObjectTorrentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetObjectTorrentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetPublicAccessBlock struct { -} - -func (*validateOpGetPublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetPublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetPublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpHeadBucket struct { -} - -func (*validateOpHeadBucket) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*HeadBucketInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpHeadBucketInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpHeadObject struct { -} - -func (*validateOpHeadObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*HeadObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpHeadObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketAnalyticsConfigurations struct { -} - -func (*validateOpListBucketAnalyticsConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketIntelligentTieringConfigurations struct { -} - -func (*validateOpListBucketIntelligentTieringConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketInventoryConfigurations struct { -} - -func (*validateOpListBucketInventoryConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListBucketMetricsConfigurations struct { -} - -func (*validateOpListBucketMetricsConfigurations) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListMultipartUploads struct { -} - -func (*validateOpListMultipartUploads) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListMultipartUploadsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListMultipartUploadsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjects struct { -} - -func (*validateOpListObjects) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjectsV2 struct { -} - -func (*validateOpListObjectsV2) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectsV2Input) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectsV2Input(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListObjectVersions struct { -} - -func (*validateOpListObjectVersions) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListObjectVersionsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListObjectVersionsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListParts struct { -} - -func (*validateOpListParts) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListPartsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListPartsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAccelerateConfiguration struct { -} - -func (*validateOpPutBucketAccelerateConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAcl struct { -} - -func (*validateOpPutBucketAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketAnalyticsConfiguration struct { -} - -func (*validateOpPutBucketAnalyticsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketCors struct { -} - -func (*validateOpPutBucketCors) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketCorsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketCorsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketEncryption struct { -} - -func (*validateOpPutBucketEncryption) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketEncryptionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketEncryptionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketIntelligentTieringConfiguration struct { -} - -func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketInventoryConfiguration struct { -} - -func (*validateOpPutBucketInventoryConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketLifecycleConfiguration struct { -} - -func (*validateOpPutBucketLifecycleConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketLogging struct { -} - -func (*validateOpPutBucketLogging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketLoggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketLoggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketMetricsConfiguration struct { -} - -func (*validateOpPutBucketMetricsConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketNotificationConfiguration struct { -} - -func (*validateOpPutBucketNotificationConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketOwnershipControls struct { -} - -func (*validateOpPutBucketOwnershipControls) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketOwnershipControlsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketOwnershipControlsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketPolicy struct { -} - -func (*validateOpPutBucketPolicy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketPolicyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketPolicyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketReplication struct { -} - -func (*validateOpPutBucketReplication) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketReplicationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketReplicationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketRequestPayment struct { -} - -func (*validateOpPutBucketRequestPayment) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketRequestPaymentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketRequestPaymentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketTagging struct { -} - -func (*validateOpPutBucketTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketVersioning struct { -} - -func (*validateOpPutBucketVersioning) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketVersioningInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketVersioningInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutBucketWebsite struct { -} - -func (*validateOpPutBucketWebsite) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutBucketWebsiteInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutBucketWebsiteInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectAcl struct { -} - -func (*validateOpPutObjectAcl) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectAclInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectAclInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObject struct { -} - -func (*validateOpPutObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectLegalHold struct { -} - -func (*validateOpPutObjectLegalHold) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectLegalHoldInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectLegalHoldInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectLockConfiguration struct { -} - -func (*validateOpPutObjectLockConfiguration) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectLockConfigurationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectLockConfigurationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectRetention struct { -} - -func (*validateOpPutObjectRetention) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectRetentionInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectRetentionInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutObjectTagging struct { -} - -func (*validateOpPutObjectTagging) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutObjectTaggingInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutObjectTaggingInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpPutPublicAccessBlock struct { -} - -func (*validateOpPutPublicAccessBlock) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*PutPublicAccessBlockInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpPutPublicAccessBlockInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRestoreObject struct { -} - -func (*validateOpRestoreObject) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RestoreObjectInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRestoreObjectInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpSelectObjectContent struct { -} - -func (*validateOpSelectObjectContent) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*SelectObjectContentInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpSelectObjectContentInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadPartCopy struct { -} - -func (*validateOpUploadPartCopy) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadPartCopyInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadPartCopyInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpUploadPart struct { -} - -func (*validateOpUploadPart) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*UploadPartInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpUploadPartInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpWriteGetObjectResponse struct { -} - -func (*validateOpWriteGetObjectResponse) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*WriteGetObjectResponseInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpWriteGetObjectResponseInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After) -} - -func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After) -} - -func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After) -} - -func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) -} - -func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) -} - -func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After) -} - -func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After) -} - -func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After) -} - -func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After) -} - -func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After) -} - -func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) -} - -func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) -} - -func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After) -} - -func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After) -} - -func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After) -} - -func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After) -} - -func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After) -} - -func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After) -} - -func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After) -} - -func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After) -} - -func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After) -} - -func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After) -} - -func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After) -} - -func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After) -} - -func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After) -} - -func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After) -} - -func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After) -} - -func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After) -} - -func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) -} - -func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After) -} - -func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) -} - -func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After) -} - -func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After) -} - -func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After) -} - -func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After) -} - -func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After) -} - -func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After) -} - -func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After) -} - -func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After) -} - -func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After) -} - -func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After) -} - -func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After) -} - -func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObject{}, middleware.After) -} - -func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After) -} - -func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After) -} - -func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After) -} - -func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After) -} - -func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After) -} - -func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After) -} - -func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After) -} - -func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After) -} - -func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After) -} - -func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After) -} - -func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After) -} - -func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After) -} - -func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After) -} - -func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjects{}, middleware.After) -} - -func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After) -} - -func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After) -} - -func addOpListPartsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListParts{}, middleware.After) -} - -func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After) -} - -func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After) -} - -func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After) -} - -func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After) -} - -func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After) -} - -func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After) -} - -func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After) -} - -func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After) -} - -func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After) -} - -func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After) -} - -func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After) -} - -func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After) -} - -func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After) -} - -func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After) -} - -func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After) -} - -func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After) -} - -func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After) -} - -func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After) -} - -func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After) -} - -func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObject{}, middleware.After) -} - -func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After) -} - -func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After) -} - -func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After) -} - -func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After) -} - -func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After) -} - -func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After) -} - -func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After) -} - -func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After) -} - -func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After) -} - -func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After) -} - -func validateAccessControlPolicy(v *types.AccessControlPolicy) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"} - if v.Grants != nil { - if err := validateGrants(v.Grants); err != nil { - invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAccessControlTranslation(v *types.AccessControlTranslation) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"} - if len(v.Owner) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Owner")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateAnalyticsFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if v.StorageClassAnalysis == nil { - invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis")) - } else if v.StorageClassAnalysis != nil { - if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil { - invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"} - if v.S3BucketDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) - } else if v.S3BucketDestination != nil { - if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsFilter(v types.AnalyticsFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"} - switch uv := v.(type) { - case *types.AnalyticsFilterMemberAnd: - if err := validateAnalyticsAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) - } - - case *types.AnalyticsFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) - } - - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"} - if len(v.Format) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Format")) - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateLifecycleRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"} - if v.LoggingEnabled != nil { - if err := validateLoggingEnabled(v.LoggingEnabled); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSConfiguration(v *types.CORSConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"} - if v.CORSRules == nil { - invalidParams.Add(smithy.NewErrParamRequired("CORSRules")) - } else if v.CORSRules != nil { - if err := validateCORSRules(v.CORSRules); err != nil { - invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSRule(v *types.CORSRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSRule"} - if v.AllowedMethods == nil { - invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods")) - } - if v.AllowedOrigins == nil { - invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateCORSRules(v []types.CORSRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CORSRules"} - for i := range v { - if err := validateCORSRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateDelete(v *types.Delete) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Delete"} - if v.Objects == nil { - invalidParams.Add(smithy.NewErrParamRequired("Objects")) - } else if v.Objects != nil { - if err := validateObjectIdentifierList(v.Objects); err != nil { - invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateDestination(v *types.Destination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Destination"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.AccessControlTranslation != nil { - if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil { - invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError)) - } - } - if v.ReplicationTime != nil { - if err := validateReplicationTime(v.ReplicationTime); err != nil { - invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError)) - } - } - if v.Metrics != nil { - if err := validateMetrics(v.Metrics); err != nil { - invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateEncryption(v *types.Encryption) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Encryption"} - if len(v.EncryptionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateErrorDocument(v *types.ErrorDocument) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateExistingObjectReplication(v *types.ExistingObjectReplication) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGlacierJobParameters(v *types.GlacierJobParameters) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"} - if len(v.Tier) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Tier")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrant(v *types.Grant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grant"} - if v.Grantee != nil { - if err := validateGrantee(v.Grantee); err != nil { - invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrantee(v *types.Grantee) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grantee"} - if len(v.Type) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Type")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateGrants(v []types.Grant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Grants"} - for i := range v { - if err := validateGrant(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIndexDocument(v *types.IndexDocument) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"} - if v.Suffix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Suffix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateIntelligentTieringFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.Tierings == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tierings")) - } else if v.Tierings != nil { - if err := validateTieringList(v.Tierings); err != nil { - invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateIntelligentTieringAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryConfiguration(v *types.InventoryConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"} - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateInventoryDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if v.IsEnabled == nil { - invalidParams.Add(smithy.NewErrParamRequired("IsEnabled")) - } - if v.Filter != nil { - if err := validateInventoryFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if len(v.IncludedObjectVersions) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions")) - } - if v.Schedule == nil { - invalidParams.Add(smithy.NewErrParamRequired("Schedule")) - } else if v.Schedule != nil { - if err := validateInventorySchedule(v.Schedule); err != nil { - invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryDestination(v *types.InventoryDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"} - if v.S3BucketDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination")) - } else if v.S3BucketDestination != nil { - if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil { - invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryEncryption(v *types.InventoryEncryption) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"} - if v.SSEKMS != nil { - if err := validateSSEKMS(v.SSEKMS); err != nil { - invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryFilter(v *types.InventoryFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"} - if v.Prefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Prefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if len(v.Format) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Format")) - } - if v.Encryption != nil { - if err := validateInventoryEncryption(v.Encryption); err != nil { - invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateInventorySchedule(v *types.InventorySchedule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"} - if len(v.Frequency) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Frequency")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"} - if v.LambdaFunctionArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"} - for i := range v { - if err := validateLambdaFunctionConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRule(v *types.LifecycleRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"} - if v.Filter != nil { - if err := validateLifecycleRuleFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateLifecycleRuleAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLifecycleRules(v []types.LifecycleRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"} - for i := range v { - if err := validateLifecycleRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateLoggingEnabled(v *types.LoggingEnabled) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"} - if v.TargetBucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetBucket")) - } - if v.TargetGrants != nil { - if err := validateTargetGrants(v.TargetGrants); err != nil { - invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError)) - } - } - if v.TargetPrefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"} - if v.S3TablesDestination == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination")) - } else if v.S3TablesDestination != nil { - if err := validateS3TablesDestination(v.S3TablesDestination); err != nil { - invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetrics(v *types.Metrics) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Metrics"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsAndOperator(v *types.MetricsAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsConfiguration(v *types.MetricsConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"} - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.Filter != nil { - if err := validateMetricsFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateMetricsFilter(v types.MetricsFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"} - switch uv := v.(type) { - case *types.MetricsFilterMemberAnd: - if err := validateMetricsAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) - } - - case *types.MetricsFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) - } - - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateNotificationConfiguration(v *types.NotificationConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"} - if v.TopicConfigurations != nil { - if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil { - invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError)) - } - } - if v.QueueConfigurations != nil { - if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil { - invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError)) - } - } - if v.LambdaFunctionConfigurations != nil { - if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil { - invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateObjectIdentifier(v *types.ObjectIdentifier) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateObjectIdentifierList(v []types.ObjectIdentifier) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"} - for i := range v { - if err := validateObjectIdentifier(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOutputLocation(v *types.OutputLocation) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"} - if v.S3 != nil { - if err := validateS3Location(v.S3); err != nil { - invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControls(v *types.OwnershipControls) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateOwnershipControlsRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"} - if len(v.ObjectOwnership) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"} - for i := range v { - if err := validateOwnershipControlsRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateQueueConfiguration(v *types.QueueConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"} - if v.QueueArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("QueueArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateQueueConfigurationList(v []types.QueueConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"} - for i := range v { - if err := validateQueueConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"} - if v.HostName == nil { - invalidParams.Add(smithy.NewErrParamRequired("HostName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicaModifications(v *types.ReplicaModifications) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationConfiguration(v *types.ReplicationConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"} - if v.Role == nil { - invalidParams.Add(smithy.NewErrParamRequired("Role")) - } - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateReplicationRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRule(v *types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"} - if v.Filter != nil { - if err := validateReplicationRuleFilter(v.Filter); err != nil { - invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) - } - } - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.SourceSelectionCriteria != nil { - if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil { - invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError)) - } - } - if v.ExistingObjectReplication != nil { - if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil { - invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError)) - } - } - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"} - if v.Tags != nil { - if err := validateTagSet(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} - if v.Tag != nil { - if err := validateTag(v.Tag); err != nil { - invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) - } - } - if v.And != nil { - if err := validateReplicationRuleAndOperator(v.And); err != nil { - invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationRules(v []types.ReplicationRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"} - for i := range v { - if err := validateReplicationRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateReplicationTime(v *types.ReplicationTime) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if v.Time == nil { - invalidParams.Add(smithy.NewErrParamRequired("Time")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"} - if len(v.Payer) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Payer")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRestoreRequest(v *types.RestoreRequest) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"} - if v.GlacierJobParameters != nil { - if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError)) - } - } - if v.SelectParameters != nil { - if err := validateSelectParameters(v.SelectParameters); err != nil { - invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError)) - } - } - if v.OutputLocation != nil { - if err := validateOutputLocation(v.OutputLocation); err != nil { - invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRoutingRule(v *types.RoutingRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"} - if v.Redirect == nil { - invalidParams.Add(smithy.NewErrParamRequired("Redirect")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateRoutingRules(v []types.RoutingRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"} - for i := range v { - if err := validateRoutingRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateS3Location(v *types.S3Location) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "S3Location"} - if v.BucketName == nil { - invalidParams.Add(smithy.NewErrParamRequired("BucketName")) - } - if v.Prefix == nil { - invalidParams.Add(smithy.NewErrParamRequired("Prefix")) - } - if v.Encryption != nil { - if err := validateEncryption(v.Encryption); err != nil { - invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError)) - } - } - if v.AccessControlList != nil { - if err := validateGrants(v.AccessControlList); err != nil { - invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError)) - } - } - if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateS3TablesDestination(v *types.S3TablesDestination) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"} - if v.TableBucketArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn")) - } - if v.TableName == nil { - invalidParams.Add(smithy.NewErrParamRequired("TableName")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSelectParameters(v *types.SelectParameters) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"} - if v.InputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) - } - if len(v.ExpressionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) - } - if v.Expression == nil { - invalidParams.Add(smithy.NewErrParamRequired("Expression")) - } - if v.OutputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"} - if len(v.SSEAlgorithm) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"} - if v.Rules == nil { - invalidParams.Add(smithy.NewErrParamRequired("Rules")) - } else if v.Rules != nil { - if err := validateServerSideEncryptionRules(v.Rules); err != nil { - invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"} - if v.ApplyServerSideEncryptionByDefault != nil { - if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil { - invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"} - for i := range v { - if err := validateServerSideEncryptionRule(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"} - if v.SseKmsEncryptedObjects != nil { - if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil { - invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError)) - } - } - if v.ReplicaModifications != nil { - if err := validateReplicaModifications(v.ReplicaModifications); err != nil { - invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSSEKMS(v *types.SSEKMS) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"} - if v.KeyId == nil { - invalidParams.Add(smithy.NewErrParamRequired("KeyId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"} - if len(v.Status) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("Status")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"} - if v.DataExport != nil { - if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil { - invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"} - if len(v.OutputSchemaVersion) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion")) - } - if v.Destination == nil { - invalidParams.Add(smithy.NewErrParamRequired("Destination")) - } else if v.Destination != nil { - if err := validateAnalyticsExportDestination(v.Destination); err != nil { - invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagging(v *types.Tagging) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tagging"} - if v.TagSet == nil { - invalidParams.Add(smithy.NewErrParamRequired("TagSet")) - } else if v.TagSet != nil { - if err := validateTagSet(v.TagSet); err != nil { - invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagSet(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagSet"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTargetGrant(v *types.TargetGrant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"} - if v.Grantee != nil { - if err := validateGrantee(v.Grantee); err != nil { - invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTargetGrants(v []types.TargetGrant) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"} - for i := range v { - if err := validateTargetGrant(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTiering(v *types.Tiering) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tiering"} - if v.Days == nil { - invalidParams.Add(smithy.NewErrParamRequired("Days")) - } - if len(v.AccessTier) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTieringList(v []types.Tiering) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TieringList"} - for i := range v { - if err := validateTiering(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTopicConfiguration(v *types.TopicConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"} - if v.TopicArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TopicArn")) - } - if v.Events == nil { - invalidParams.Add(smithy.NewErrParamRequired("Events")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTopicConfigurationList(v []types.TopicConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"} - for i := range v { - if err := validateTopicConfiguration(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"} - if v.ErrorDocument != nil { - if err := validateErrorDocument(v.ErrorDocument); err != nil { - invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError)) - } - } - if v.IndexDocument != nil { - if err := validateIndexDocument(v.IndexDocument); err != nil { - invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError)) - } - } - if v.RedirectAllRequestsTo != nil { - if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil { - invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError)) - } - } - if v.RoutingRules != nil { - if err := validateRoutingRules(v.RoutingRules); err != nil { - invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCopyObjectInput(v *CopyObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CopySource == nil { - invalidParams.Add(smithy.NewErrParamRequired("CopySource")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateBucketInput(v *CreateBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.MetadataTableConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration")) - } else if v.MetadataTableConfiguration != nil { - if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil { - invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateSessionInput(v *CreateSessionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketInput(v *DeleteBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectInput(v *DeleteObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Delete == nil { - invalidParams.Add(smithy.NewErrParamRequired("Delete")) - } else if v.Delete != nil { - if err := validateDelete(v.Delete); err != nil { - invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAclInput(v *GetBucketAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectAclInput(v *GetObjectAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.ObjectAttributes == nil { - invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectInput(v *GetObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpHeadBucketInput(v *HeadBucketInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpHeadObjectInput(v *HeadObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectsInput(v *ListObjectsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectsV2Input(v *ListObjectsV2Input) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListPartsInput(v *ListPartsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.AccelerateConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAclInput(v *PutBucketAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} - if v.AccessControlPolicy != nil { - if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) - } - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.AnalyticsConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration")) - } else if v.AnalyticsConfiguration != nil { - if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil { - invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CORSConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration")) - } else if v.CORSConfiguration != nil { - if err := validateCORSConfiguration(v.CORSConfiguration); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.ServerSideEncryptionConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration")) - } else if v.ServerSideEncryptionConfiguration != nil { - if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil { - invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.IntelligentTieringConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration")) - } else if v.IntelligentTieringConfiguration != nil { - if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil { - invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.InventoryConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration")) - } else if v.InventoryConfiguration != nil { - if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil { - invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.LifecycleConfiguration != nil { - if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.BucketLoggingStatus == nil { - invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus")) - } else if v.BucketLoggingStatus != nil { - if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Id == nil { - invalidParams.Add(smithy.NewErrParamRequired("Id")) - } - if v.MetricsConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration")) - } else if v.MetricsConfiguration != nil { - if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil { - invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.NotificationConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration")) - } else if v.NotificationConfiguration != nil { - if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.OwnershipControls == nil { - invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls")) - } else if v.OwnershipControls != nil { - if err := validateOwnershipControls(v.OwnershipControls); err != nil { - invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Policy == nil { - invalidParams.Add(smithy.NewErrParamRequired("Policy")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.ReplicationConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration")) - } else if v.ReplicationConfiguration != nil { - if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.RequestPaymentConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration")) - } else if v.RequestPaymentConfiguration != nil { - if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Tagging == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tagging")) - } else if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.VersioningConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.WebsiteConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration")) - } else if v.WebsiteConfiguration != nil { - if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectAclInput(v *PutObjectAclInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} - if v.AccessControlPolicy != nil { - if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) - } - } - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectInput(v *PutObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Tagging == nil { - invalidParams.Add(smithy.NewErrParamRequired("Tagging")) - } else if v.Tagging != nil { - if err := validateTagging(v.Tagging); err != nil { - invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.PublicAccessBlockConfiguration == nil { - invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRestoreObjectInput(v *RestoreObjectInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.RestoreRequest != nil { - if err := validateRestoreRequest(v.RestoreRequest); err != nil { - invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Expression == nil { - invalidParams.Add(smithy.NewErrParamRequired("Expression")) - } - if len(v.ExpressionType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("ExpressionType")) - } - if v.InputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("InputSerialization")) - } - if v.OutputSerialization == nil { - invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.CopySource == nil { - invalidParams.Add(smithy.NewErrParamRequired("CopySource")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.PartNumber == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpUploadPartInput(v *UploadPartInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.PartNumber == nil { - invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) - } - if v.UploadId == nil { - invalidParams.Add(smithy.NewErrParamRequired("UploadId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"} - if v.RequestRoute == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestRoute")) - } - if v.RequestToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("RequestToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md deleted file mode 100644 index 2e35d6cb4..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ /dev/null @@ -1,559 +0,0 @@ -# v1.24.10 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.9 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.8 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.7 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.6 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.5 (2024-11-07) - -* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses - -# v1.24.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.4 (2024-10-03) - -* No change notes available for this release. - -# v1.23.3 (2024-09-27) - -* No change notes available for this release. - -# v1.23.2 (2024-09-25) - -* No change notes available for this release. - -# v1.23.1 (2024-09-23) - -* No change notes available for this release. - -# v1.23.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.22.7 (2024-09-04) - -* No change notes available for this release. - -# v1.22.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.5 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.4 (2024-07-18) - -* No change notes available for this release. - -# v1.22.3 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.2 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.21.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.12 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.11 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.10 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.9 (2024-05-23) - -* No change notes available for this release. - -# v1.20.8 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.7 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.6 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.20.5 (2024-04-05) - -* No change notes available for this release. - -# v1.20.4 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.19.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.19.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2024-01-18) - -* No change notes available for this release. - -# v1.18.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.18.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.18.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.4 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.17.3 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2023-10-02) - -* **Feature**: Fix FIPS Endpoints in aws-us-gov. - -# v1.14.1 (2023-09-22) - -* No change notes available for this release. - -# v1.14.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.13.6 (2023-08-31) - -* No change notes available for this release. - -# v1.13.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2023-08-01) - -* No change notes available for this release. - -# v1.13.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2023-06-15) - -* No change notes available for this release. - -# v1.12.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2023-05-04) - -* No change notes available for this release. - -# v1.12.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2023-04-10) - -* No change notes available for this release. - -# v1.12.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.12.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.12.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.11.28 (2022-12-20) - -* No change notes available for this release. - -# v1.11.27 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.26 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.25 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.24 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.23 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.22 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.21 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.20 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.19 (2022-08-30) - -* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. - -# v1.11.18 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.17 (2022-08-15) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) - -# v1.11.16 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.15 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.14 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.13 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.12 (2022-07-11) - -* No change notes available for this release. - -# v1.11.11 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.10 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.9 (2022-06-16) - -* No change notes available for this release. - -# v1.11.8 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.7 (2022-05-26) - -* No change notes available for this release. - -# v1.11.6 (2022-05-25) - -* No change notes available for this release. - -# v1.11.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated API models -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-12-21) - -* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. - -# v1.6.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Feature**: Updated service to latest API model. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go deleted file mode 100644 index 644ee1e05..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ /dev/null @@ -1,912 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "SSO" -const ServiceAPIVersion = "2019-06-10" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso") -} - -// Client provides the API client to make operations call for AWS Single Sign-On. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go deleted file mode 100644 index a65602023..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the STS short-term credentials for a given role name that is assigned -// to the user. -func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { - if params == nil { - params = &GetRoleCredentialsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetRoleCredentialsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetRoleCredentialsInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The friendly name of the role that is assigned to the user. - // - // This member is required. - RoleName *string - - noSmithyDocumentSerde -} - -type GetRoleCredentialsOutput struct { - - // The credentials for the role that is assigned to the user. - RoleCredentials *types.RoleCredentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetRoleCredentials", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go deleted file mode 100644 index 315526ef1..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ /dev/null @@ -1,266 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all roles that are assigned to the user for a given AWS account. -func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if params == nil { - params = &ListAccountRolesInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountRolesOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountRolesInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // The identifier for the AWS account that is assigned to the user. - // - // This member is required. - AccountId *string - - // The number of items that clients can request per page. - MaxResults *int32 - - // The page token from the previous response output when you request subsequent - // pages. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountRolesOutput struct { - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // A paginated response with the list of roles and the next token if more results - // are available. - RoleList []types.RoleInfo - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles -type ListAccountRolesPaginatorOptions struct { - // The number of items that clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountRolesPaginator is a paginator for ListAccountRoles -type ListAccountRolesPaginator struct { - options ListAccountRolesPaginatorOptions - client ListAccountRolesAPIClient - params *ListAccountRolesInput - nextToken *string - firstPage bool -} - -// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator -func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { - if params == nil { - params = &ListAccountRolesInput{} - } - - options := ListAccountRolesPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountRolesPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountRolesPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccountRoles page. -func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccountRoles", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go deleted file mode 100644 index d867b78a6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity -// Center User Guide. This operation returns a paginated response. -// -// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers -func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if params == nil { - params = &ListAccountsInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*ListAccountsOutput) - out.ResultMetadata = metadata - return out, nil -} - -type ListAccountsInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - // This is the number of items clients can request per page. - MaxResults *int32 - - // (Optional) When requesting subsequent pages, this is the page token from the - // previous response output. - NextToken *string - - noSmithyDocumentSerde -} - -type ListAccountsOutput struct { - - // A paginated response with the list of account information and the next token if - // more results are available. - AccountList []types.AccountInfo - - // The page token client that is used to retrieve the list of accounts. - NextToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpListAccountsValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -// ListAccountsPaginatorOptions is the paginator options for ListAccounts -type ListAccountsPaginatorOptions struct { - // This is the number of items clients can request per page. - Limit int32 - - // Set to true if pagination should stop if the service returns a pagination token - // that matches the most recent token provided to the service. - StopOnDuplicateToken bool -} - -// ListAccountsPaginator is a paginator for ListAccounts -type ListAccountsPaginator struct { - options ListAccountsPaginatorOptions - client ListAccountsAPIClient - params *ListAccountsInput - nextToken *string - firstPage bool -} - -// NewListAccountsPaginator returns a new ListAccountsPaginator -func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { - if params == nil { - params = &ListAccountsInput{} - } - - options := ListAccountsPaginatorOptions{} - if params.MaxResults != nil { - options.Limit = *params.MaxResults - } - - for _, fn := range optFns { - fn(&options) - } - - return &ListAccountsPaginator{ - options: options, - client: client, - params: params, - firstPage: true, - nextToken: params.NextToken, - } -} - -// HasMorePages returns a boolean indicating whether more pages are available -func (p *ListAccountsPaginator) HasMorePages() bool { - return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) -} - -// NextPage retrieves the next ListAccounts page. -func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { - if !p.HasMorePages() { - return nil, fmt.Errorf("no more pages available") - } - - params := *p.params - params.NextToken = p.nextToken - - var limit *int32 - if p.options.Limit > 0 { - limit = &p.options.Limit - } - params.MaxResults = limit - - optFns = append([]func(*Options){ - addIsPaginatorUserAgent, - }, optFns...) - result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) - if err != nil { - return nil, err - } - p.firstPage = false - - prevToken := p.nextToken - p.nextToken = result.NextToken - - if p.options.StopOnDuplicateToken && - prevToken != nil && - p.nextToken != nil && - *prevToken == *p.nextToken { - p.nextToken = nil - } - - return result, nil -} - -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - -func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "ListAccounts", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go deleted file mode 100644 index 434b43085..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Removes the locally stored SSO tokens from the client-side cache and sends an -// API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. -// -// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM -// Identity Center sign in session is used to obtain an IAM session, as specified -// in the corresponding IAM Identity Center permission set. More specifically, IAM -// Identity Center assumes an IAM role in the target account on behalf of the user, -// and the corresponding temporary AWS credentials are returned to the client. -// -// After user logout, any existing IAM role sessions that were created by using -// IAM Identity Center permission sets continue based on the duration configured in -// the permission set. For more information, see [User authentications]in the IAM Identity Center User -// Guide. -// -// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html -func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { - if params == nil { - params = &LogoutInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*LogoutOutput) - out.ResultMetadata = metadata - return out, nil -} - -type LogoutInput struct { - - // The token issued by the CreateToken API call. For more information, see [CreateToken] in the - // IAM Identity Center OIDC API Reference Guide. - // - // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html - // - // This member is required. - AccessToken *string - - noSmithyDocumentSerde -} - -type LogoutOutput struct { - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpLogoutValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "Logout", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go deleted file mode 100644 index 366963b49..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "Logout": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "awsssoportal") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go deleted file mode 100644 index ec23c36f5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/sso/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "io/ioutil" - "strings" - "time" -) - -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - -type awsRestjson1_deserializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) - } - output := &GetRoleCredentialsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *GetRoleCredentialsOutput - if *v == nil { - sv = &GetRoleCredentialsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "roleCredentials": - if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccountRoles struct { -} - -func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) - } - output := &ListAccountRolesOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountRolesOutput - if *v == nil { - sv = &ListAccountRolesOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "roleList": - if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { - return err - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpListAccounts struct { -} - -func (*awsRestjson1_deserializeOpListAccounts) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) - } - output := &ListAccountsOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *ListAccountsOutput - if *v == nil { - sv = &ListAccountsOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountList": - if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { - return err - } - - case "nextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpLogout struct { -} - -func (*awsRestjson1_deserializeOpLogout) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) - } - output := &LogoutOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ResourceNotFoundException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.TooManyRequestsException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccountInfo - if *v == nil { - sv = &types.AccountInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "accountName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) - } - sv.AccountName = ptr.String(jtv) - } - - case "emailAddress": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) - } - sv.EmailAddress = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.AccountInfo - if *v == nil { - cv = []types.AccountInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.AccountInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ResourceNotFoundException - if *v == nil { - sv = &types.ResourceNotFoundException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleCredentials - if *v == nil { - sv = &types.RoleCredentials{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessKeyId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) - } - sv.AccessKeyId = ptr.String(jtv) - } - - case "expiration": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Expiration = i64 - } - - case "secretAccessKey": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) - } - sv.SecretAccessKey = ptr.String(jtv) - } - - case "sessionToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) - } - sv.SessionToken = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.RoleInfo - if *v == nil { - sv = &types.RoleInfo{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accountId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) - } - sv.AccountId = ptr.String(jtv) - } - - case "roleName": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) - } - sv.RoleName = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []types.RoleInfo - if *v == nil { - cv = []types.RoleInfo{} - } else { - cv = *v - } - - for _, value := range shape { - var col types.RoleInfo - destAddr := &col - if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { - return err - } - col = *destAddr - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.TooManyRequestsException - if *v == nil { - sv = &types.TooManyRequestsException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedException - if *v == nil { - sv = &types.UnauthorizedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "message", "Message": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Message = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go deleted file mode 100644 index 7f6e429fd..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sso provides the API client, operations, and parameter types for AWS -// Single Sign-On. -// -// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web -// service that makes it easy for you to assign user access to IAM Identity Center -// resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. -// -// Although AWS Single Sign-On was renamed, the sso and identitystore API -// namespaces will continue to retain their original name for backward -// compatibility purposes. For more information, see [IAM Identity Center rename]. -// -// This reference guide describes the IAM Identity Center Portal operations that -// you can call programatically and includes detailed information on data types and -// errors. -// -// AWS provides SDKs that consist of libraries and sample code for various -// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. -// The SDKs provide a convenient way to create programmatic access to IAM Identity -// Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see [Tools for Amazon Web Services]. -// -// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ -// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed -package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go deleted file mode 100644 index 53c6bc756..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "awsssoportal" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://portal.sso.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json deleted file mode 100644 index 936253d7c..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_GetRoleCredentials.go", - "api_op_ListAccountRoles.go", - "api_op_ListAccounts.go", - "api_op_Logout.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/sso", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go deleted file mode 100644 index 7ea60195d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sso - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.24.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go deleted file mode 100644 index 081867b3d..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ /dev/null @@ -1,566 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "portal.sso.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "portal.sso-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "portal.sso-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "portal.sso.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go deleted file mode 100644 index aa744f159..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go deleted file mode 100644 index a7a5b57de..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go +++ /dev/null @@ -1,309 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpGetRoleCredentials struct { -} - -func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetRoleCredentialsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/federation/credentials") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.RoleName != nil { - encoder.SetQuery("role_name").String(*v.RoleName) - } - - return nil -} - -type awsRestjson1_serializeOpListAccountRoles struct { -} - -func (*awsRestjson1_serializeOpListAccountRoles) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountRolesInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/roles") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.AccountId != nil { - encoder.SetQuery("account_id").String(*v.AccountId) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpListAccounts struct { -} - -func (*awsRestjson1_serializeOpListAccounts) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*ListAccountsInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - if v.MaxResults != nil { - encoder.SetQuery("max_result").Integer(*v.MaxResults) - } - - if v.NextToken != nil { - encoder.SetQuery("next_token").String(*v.NextToken) - } - - return nil -} - -type awsRestjson1_serializeOpLogout struct { -} - -func (*awsRestjson1_serializeOpLogout) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*LogoutInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/logout") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.AccessToken != nil { - locationName := "X-Amz-Sso_bearer_token" - encoder.SetHeader(locationName).String(*v.AccessToken) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go deleted file mode 100644 index e97a126e8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// Indicates that a problem occurred with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified resource doesn't exist. -type ResourceNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ResourceNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ResourceNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ResourceNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is being made too frequently and is more than what -// the server can handle. -type TooManyRequestsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyRequestsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyRequestsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyRequestsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. -type UnauthorizedException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go deleted file mode 100644 index 07ac468e3..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -// Provides information about your AWS account. -type AccountInfo struct { - - // The identifier of the AWS account that is assigned to the user. - AccountId *string - - // The display name of the AWS account that is assigned to the user. - AccountName *string - - // The email address of the AWS account that is assigned to the user. - EmailAddress *string - - noSmithyDocumentSerde -} - -// Provides information about the role credentials that are assigned to the user. -type RoleCredentials struct { - - // The identifier used for the temporary security credentials. For more - // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - AccessKeyId *string - - // The date on which temporary security credentials expire. - Expiration int64 - - // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS - // IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - SecretAccessKey *string - - // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS - // IAM User Guide. - // - // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - SessionToken *string - - noSmithyDocumentSerde -} - -// Provides information about the role that is assigned to the user. -type RoleInfo struct { - - // The identifier of the AWS account assigned to the user. - AccountId *string - - // The friendly name of the role that is assigned to the user. - RoleName *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go deleted file mode 100644 index f6bf461f7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go +++ /dev/null @@ -1,175 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sso - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpGetRoleCredentials struct { -} - -func (*validateOpGetRoleCredentials) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetRoleCredentialsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetRoleCredentialsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccountRoles struct { -} - -func (*validateOpListAccountRoles) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountRolesInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountRolesInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpListAccounts struct { -} - -func (*validateOpListAccounts) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*ListAccountsInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpListAccountsInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpLogout struct { -} - -func (*validateOpLogout) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*LogoutInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpLogoutInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) -} - -func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) -} - -func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) -} - -func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpLogout{}, middleware.After) -} - -func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} - if v.RoleName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleName")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if v.AccountId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccountId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpListAccountsInput(v *ListAccountsInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpLogoutInput(v *LogoutInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} - if v.AccessToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md deleted file mode 100644 index da43a82a5..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ /dev/null @@ -1,549 +0,0 @@ -# v1.28.9 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.6 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.4 (2024-10-03) - -* No change notes available for this release. - -# v1.27.3 (2024-09-27) - -* No change notes available for this release. - -# v1.27.2 (2024-09-25) - -* No change notes available for this release. - -# v1.27.1 (2024-09-23) - -* No change notes available for this release. - -# v1.27.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.26.7 (2024-09-04) - -* No change notes available for this release. - -# v1.26.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.5 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.4 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.2 (2024-07-03) - -* No change notes available for this release. - -# v1.26.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.25.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.6 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.5 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.4 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.3 (2024-05-23) - -* No change notes available for this release. - -# v1.24.2 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.1 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2024-05-10) - -* **Feature**: Updated request parameters for PKCE support. - -# v1.23.5 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.23.4 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.3 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.22.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.22.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.22.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.7 (2024-01-16) - -* No change notes available for this release. - -# v1.21.6 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.21.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. - -# v1.21.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.3 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.2 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.20.1 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-11-17) - -* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`. - -# v1.19.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.3 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.2 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.1 (2023-09-22) - -* No change notes available for this release. - -# v1.17.0 (2023-09-20) - -* **Feature**: Update FIPS endpoints in aws-us-gov. - -# v1.16.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.15.6 (2023-09-05) - -* No change notes available for this release. - -# v1.15.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.1 (2023-08-01) - -* No change notes available for this release. - -# v1.15.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.14 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.13 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.12 (2023-06-15) - -* No change notes available for this release. - -# v1.14.11 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.10 (2023-05-04) - -* No change notes available for this release. - -# v1.14.9 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.8 (2023-04-10) - -* No change notes available for this release. - -# v1.14.7 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.6 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.5 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.4 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.14.3 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.2 (2023-02-15) - -* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. -* **Bug Fix**: Correct error type parsing for restJson services. - -# v1.14.1 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.13.11 (2022-12-19) - -* No change notes available for this release. - -# v1.13.10 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.9 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.8 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.7 (2022-10-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.6 (2022-09-30) - -* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. - -# v1.13.5 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.4 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.3 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.2 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.1 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-08-25) - -* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. - -# v1.12.14 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.13 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.12 (2022-08-08) - -* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.11 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.10 (2022-07-11) - -* No change notes available for this release. - -# v1.12.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.6 (2022-05-27) - -* No change notes available for this release. - -# v1.12.5 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.9.0 (2022-01-07) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.2 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-10-21) - -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-10-11) - -* **Feature**: API client updated -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-09-17) - -* **Feature**: Updated API client and endpoints to latest revision. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-08-27) - -* **Feature**: Updated API model to latest revision. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.3 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.2 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.1 (2021-07-15) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.3.0 (2021-06-25) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.2.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go deleted file mode 100644 index 0b05bf6c7..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ /dev/null @@ -1,912 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "SSO OIDC" -const ServiceAPIVersion = "2019-06-10" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc") -} - -// Client provides the API client to make operations call for AWS SSO OIDC. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go deleted file mode 100644 index 5fb8d2ab9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients that are -// authenticated using client secrets. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. -func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { - if params == nil { - params = &CreateTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenInput struct { - - // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClientAPI. - // - // This member is required. - ClientId *string - - // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClientAPI. - // - // This member is required. - ClientSecret *string - - // Supports the following OAuth grant types: Device Code and Refresh Token. - // Specify either of the following values, depending on the grant type that you - // want: - // - // * Device Code - urn:ietf:params:oauth:grant-type:device_code - // - // * Refresh Token - refresh_token - // - // For information about how to obtain the device code, see the StartDeviceAuthorization topic. - // - // This member is required. - GrantType *string - - // Used only when calling this API for the Authorization Code grant type. The - // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateTokenAPI. - Code *string - - // Used only when calling this API for the Authorization Code grant type. This - // value is generated by the client and presented to validate the original code - // challenge value the client passed at authorization time. - CodeVerifier *string - - // Used only when calling this API for the Device Code grant type. This short-term - // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorizationAPI. - DeviceCode *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If this value is not - // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient. - Scope []string - - noSmithyDocumentSerde -} - -type CreateTokenOutput struct { - - // A bearer token to access Amazon Web Services accounts and applications assigned - // to a user. - AccessToken *string - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // The idToken is not implemented or supported. For more information about the - // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. - // - // A JSON Web Token (JWT) that identifies who is associated with the issued access - // token. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - IdToken *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // Used to notify the client that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpCreateTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go deleted file mode 100644 index 8abd43690..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ /dev/null @@ -1,271 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Creates and returns access and refresh tokens for clients and applications that -// are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned Amazon Web Services accounts or to -// access application APIs using bearer authentication. -func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { - if params == nil { - params = &CreateTokenWithIAMInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*CreateTokenWithIAMOutput) - out.ResultMetadata = metadata - return out, nil -} - -type CreateTokenWithIAMInput struct { - - // The unique identifier string for the client or application. This value is an - // application ARN that has OAuth grants configured. - // - // This member is required. - ClientId *string - - // Supports the following OAuth grant types: Authorization Code, Refresh Token, - // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: - // - // * Authorization Code - authorization_code - // - // * Refresh Token - refresh_token - // - // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer - // - // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange - // - // This member is required. - GrantType *string - - // Used only when calling this API for the JWT Bearer grant type. This value - // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To - // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the - // application. - Assertion *string - - // Used only when calling this API for the Authorization Code grant type. This - // short-term code is used to identify this authorization request. The code is - // obtained through a redirect from IAM Identity Center to a redirect URI persisted - // in the Authorization Code GrantOptions for the application. - Code *string - - // Used only when calling this API for the Authorization Code grant type. This - // value is generated by the client and presented to validate the original code - // challenge value the client passed at authorization time. - CodeVerifier *string - - // Used only when calling this API for the Authorization Code grant type. This - // value specifies the location of the client or application that has registered to - // receive the authorization code. - RedirectUri *string - - // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that the requester can receive. The following values - // are supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - // - // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token - RequestedTokenType *string - - // The list of scopes for which authorization is requested. The access token that - // is issued is limited to the scopes that are granted. If the value is not - // specified, IAM Identity Center authorizes all scopes configured for the - // application, including the following default scopes: openid , aws , - // sts:identity_context . - Scope []string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the subject of the exchange. The value of the subject token must be an - // access token issued by IAM Identity Center to a different client or application. - // The access token must have authorized scopes that indicate the requested - // application as a target audience. - SubjectToken *string - - // Used only when calling this API for the Token Exchange grant type. This value - // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - SubjectTokenType *string - - noSmithyDocumentSerde -} - -type CreateTokenWithIAMOutput struct { - - // A bearer token to access Amazon Web Services accounts and applications assigned - // to a user. - AccessToken *string - - // Indicates the time in seconds when an access token will expire. - ExpiresIn int32 - - // A JSON Web Token (JWT) that identifies the user associated with the issued - // access token. - IdToken *string - - // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: - // - // * Access Token - urn:ietf:params:oauth:token-type:access_token - // - // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token - IssuedTokenType *string - - // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. - // - // For more information about the features and limitations of the current IAM - // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the [IAM Identity Center OIDC API Reference]. - // - // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html - RefreshToken *string - - // The list of scopes for which authorization is granted. The access token that is - // issued is limited to the scopes that are granted. - Scope []string - - // Used to notify the requester that the returned token is an access token. The - // supported token type is Bearer . - TokenType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "CreateTokenWithIAM", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go deleted file mode 100644 index 03a3594be..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ /dev/null @@ -1,201 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Registers a client with IAM Identity Center. This allows clients to initiate -// device authorization. The output should be persisted for reuse through many -// authentication requests. -func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { - if params == nil { - params = &RegisterClientInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*RegisterClientOutput) - out.ResultMetadata = metadata - return out, nil -} - -type RegisterClientInput struct { - - // The friendly name of the client. - // - // This member is required. - ClientName *string - - // The type of client. The service supports only public as a client type. Anything - // other than public will be rejected by the service. - // - // This member is required. - ClientType *string - - // This IAM Identity Center application ARN is used to define - // administrator-managed configuration for public client access to resources. At - // authorization, the scopes, grants, and redirect URI available to this client - // will be restricted by this application resource. - EntitledApplicationArn *string - - // The list of OAuth 2.0 grant types that are defined by the client. This list is - // used to restrict the token granting flows available to the client. - GrantTypes []string - - // The IAM Identity Center Issuer URL associated with an instance of IAM Identity - // Center. This value is needed for user access to resources through the client. - IssuerUrl *string - - // The list of redirect URI that are defined by the client. At completion of - // authorization, this list is used to restrict what locations the user agent can - // be redirected back to. - RedirectUris []string - - // The list of scopes that are defined by the client. Upon authorization, this - // list is used to restrict permissions when granting an access token. - Scopes []string - - noSmithyDocumentSerde -} - -type RegisterClientOutput struct { - - // An endpoint that the client can use to request authorization. - AuthorizationEndpoint *string - - // The unique identifier string for each client. This client uses this identifier - // to get authenticated by the service in subsequent calls. - ClientId *string - - // Indicates the time at which the clientId and clientSecret were issued. - ClientIdIssuedAt int64 - - // A secret string generated for the client. The client will use this string to - // get authenticated by the service in subsequent calls. - ClientSecret *string - - // Indicates the time at which the clientId and clientSecret will become invalid. - ClientSecretExpiresAt int64 - - // An endpoint that the client can use to create tokens. - TokenEndpoint *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpRegisterClientValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "RegisterClient", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go deleted file mode 100644 index 203ca5e67..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ /dev/null @@ -1,191 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Initiates device authorization by requesting a pair of verification codes from -// the authorization service. -func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { - if params == nil { - params = &StartDeviceAuthorizationInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*StartDeviceAuthorizationOutput) - out.ResultMetadata = metadata - return out, nil -} - -type StartDeviceAuthorizationInput struct { - - // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the RegisterClientAPI - // operation. - // - // This member is required. - ClientId *string - - // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClientAPI operation. - // - // This member is required. - ClientSecret *string - - // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] - // in the IAM Identity Center User Guide. - // - // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html - // - // This member is required. - StartUrl *string - - noSmithyDocumentSerde -} - -type StartDeviceAuthorizationOutput struct { - - // The short-lived code that is used by the device when polling for a session - // token. - DeviceCode *string - - // Indicates the number of seconds in which the verification code will become - // invalid. - ExpiresIn int32 - - // Indicates the number of seconds the client must wait between attempts when - // polling for a session. - Interval int32 - - // A one-time user verification code. This is needed to authorize an in-use device. - UserCode *string - - // The URI of the verification page that takes the userCode to authorize the - // device. - VerificationUri *string - - // An alternate URL that the client can use to automatically launch a browser. - // This process skips the manual step in which the user visits the verification - // page and enters their code. - VerificationUriComplete *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "StartDeviceAuthorization", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go deleted file mode 100644 index e4b87f5bc..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ /dev/null @@ -1,331 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sso-oauth") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go deleted file mode 100644 index ae9f145e6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ /dev/null @@ -1,2188 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" - "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" - smithy "github.com/aws/smithy-go" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strings" - "time" -) - -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - -type awsRestjson1_deserializeOpCreateToken struct { -} - -func (*awsRestjson1_deserializeOpCreateToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) - } - output := &CreateTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenOutput - if *v == nil { - sv = &CreateTokenOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata) - } - output := &CreateTokenWithIAMOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("AuthorizationPendingException", errorCode): - return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) - - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidGrantException", errorCode): - return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidRequestRegionException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *CreateTokenWithIAMOutput - if *v == nil { - sv = &CreateTokenWithIAMOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "accessToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) - } - sv.AccessToken = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "idToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) - } - sv.IdToken = ptr.String(jtv) - } - - case "issuedTokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value) - } - sv.IssuedTokenType = ptr.String(jtv) - } - - case "refreshToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) - } - sv.RefreshToken = ptr.String(jtv) - } - - case "scope": - if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil { - return err - } - - case "tokenType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) - } - sv.TokenType = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpRegisterClient struct { -} - -func (*awsRestjson1_deserializeOpRegisterClient) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) - } - output := &RegisterClientOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientMetadataException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) - - case strings.EqualFold("InvalidRedirectUriException", errorCode): - return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("InvalidScopeException", errorCode): - return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) - - case strings.EqualFold("UnsupportedGrantTypeException", errorCode): - return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *RegisterClientOutput - if *v == nil { - sv = &RegisterClientOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "authorizationEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.AuthorizationEndpoint = ptr.String(jtv) - } - - case "clientId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) - } - sv.ClientId = ptr.String(jtv) - } - - case "clientIdIssuedAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientIdIssuedAt = i64 - } - - case "clientSecret": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) - } - sv.ClientSecret = ptr.String(jtv) - } - - case "clientSecretExpiresAt": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ClientSecretExpiresAt = i64 - } - - case "tokenEndpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.TokenEndpoint = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -type awsRestjson1_deserializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) - } - output := &StartDeviceAuthorizationOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - span.End() - return out, metadata, err -} - -func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("InvalidClientException", errorCode): - return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) - - case strings.EqualFold("InvalidRequestException", errorCode): - return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) - - case strings.EqualFold("SlowDownException", errorCode): - return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) - - case strings.EqualFold("UnauthorizedClientException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *StartDeviceAuthorizationOutput - if *v == nil { - sv = &StartDeviceAuthorizationOutput{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "deviceCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) - } - sv.DeviceCode = ptr.String(jtv) - } - - case "expiresIn": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.ExpiresIn = int32(i64) - } - - case "interval": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.Interval = int32(i64) - } - - case "userCode": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) - } - sv.UserCode = ptr.String(jtv) - } - - case "verificationUri": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUri = ptr.String(jtv) - } - - case "verificationUriComplete": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected URI to be of type string, got %T instead", value) - } - sv.VerificationUriComplete = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AccessDeniedException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AuthorizationPendingException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InternalServerException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidClientMetadataException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidGrantException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRedirectUriException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRequestRegionException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidScopeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SlowDownException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnauthorizedClientException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.UnsupportedGrantTypeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) - - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - - return output -} - -func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AccessDeniedException - if *v == nil { - sv = &types.AccessDeniedException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.AuthorizationPendingException - if *v == nil { - sv = &types.AuthorizationPendingException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InternalServerException - if *v == nil { - sv = &types.InternalServerException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientException - if *v == nil { - sv = &types.InvalidClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidClientMetadataException - if *v == nil { - sv = &types.InvalidClientMetadataException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidGrantException - if *v == nil { - sv = &types.InvalidGrantException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRedirectUriException - if *v == nil { - sv = &types.InvalidRedirectUriException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestException - if *v == nil { - sv = &types.InvalidRequestException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidRequestRegionException - if *v == nil { - sv = &types.InvalidRequestRegionException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "endpoint": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Location to be of type string, got %T instead", value) - } - sv.Endpoint = ptr.String(jtv) - } - - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - case "region": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Region to be of type string, got %T instead", value) - } - sv.Region = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.InvalidScopeException - if *v == nil { - sv = &types.InvalidScopeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var cv []string - if *v == nil { - cv = []string{} - } else { - cv = *v - } - - for _, value := range shape { - var col string - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Scope to be of type string, got %T instead", value) - } - col = jtv - } - cv = append(cv, col) - - } - *v = cv - return nil -} - -func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.SlowDownException - if *v == nil { - sv = &types.SlowDownException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnauthorizedClientException - if *v == nil { - sv = &types.UnauthorizedClientException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.UnsupportedGrantTypeException - if *v == nil { - sv = &types.UnsupportedGrantTypeException{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "error": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected Error to be of type string, got %T instead", value) - } - sv.Error_ = ptr.String(jtv) - } - - case "error_description": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) - } - sv.Error_description = ptr.String(jtv) - } - - default: - _, _ = key, value - - } - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go deleted file mode 100644 index 1d258e567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package ssooidc provides the API client, operations, and parameter types for -// AWS SSO OIDC. -// -// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a -// client (such as CLI or a native application) to register with IAM Identity -// Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. -// -// IAM Identity Center uses the sso and identitystore API namespaces. -// -// # Considerations for Using This Guide -// -// Before you begin using this guide, we recommend that you first review the -// following important information about how the IAM Identity Center OIDC service -// works. -// -// - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to -// enable single sign-on authentication with the CLI. -// -// - With older versions of the CLI, the service only emits OIDC access tokens, -// so to obtain a new token, users must explicitly re-authenticate. To access the -// OIDC flow that supports token refresh and doesn’t require re-authentication, -// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with -// support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see [Configure Amazon Web Services access portal session duration]. -// -// - The access tokens provided by this service grant access to all Amazon Web -// Services account entitlements assigned to an IAM Identity Center user, not just -// a particular application. -// -// - The documentation in this guide does not describe the mechanism to convert -// the access token into Amazon Web Services Auth (“sigv4â€) credentials for use -// with IAM-protected Amazon Web Services service endpoints. For more information, -// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. -// -// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity -// Center User Guide. -// -// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html -// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html -// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 -// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html -package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go deleted file mode 100644 index 6feea0c9f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ /dev/null @@ -1,556 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sso-oauth" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://oidc.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json deleted file mode 100644 index b2a52633b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_CreateToken.go", - "api_op_CreateTokenWithIAM.go", - "api_op_RegisterClient.go", - "api_op_StartDeviceAuthorization.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go deleted file mode 100644 index 7eedb9793..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package ssooidc - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go deleted file mode 100644 index b4c61ebad..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ /dev/null @@ -1,566 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver SSO OIDC endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.af-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "af-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-northeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-northeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-1", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-2", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-3", - }, - }, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{ - Hostname: "oidc.ap-southeast-4.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ap-southeast-4", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.ca-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "ca-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-central-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-central-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-south-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-south-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-2", - }, - }, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "eu-west-3", - }, - }, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.il-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "il-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-central-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-central-1", - }, - }, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{ - Hostname: "oidc.me-south-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "me-south-1", - }, - }, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "sa-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-north-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-north-1", - }, - }, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{ - Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", - CredentialScope: endpoints.CredentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "oidc.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "oidc-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "oidc-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "oidc.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go deleted file mode 100644 index 55dd80d0e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go deleted file mode 100644 index 1ad103d1e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ /dev/null @@ -1,512 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "bytes" - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - smithyjson "github.com/aws/smithy-go/encoding/json" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -type awsRestjson1_serializeOpCreateToken struct { -} - -func (*awsRestjson1_serializeOpCreateToken) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.CodeVerifier != nil { - ok := object.Key("codeVerifier") - ok.String(*v.CodeVerifier) - } - - if v.DeviceCode != nil { - ok := object.Key("deviceCode") - ok.String(*v.DeviceCode) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpCreateTokenWithIAM struct { -} - -func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.Assertion != nil { - ok := object.Key("assertion") - ok.String(*v.Assertion) - } - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.Code != nil { - ok := object.Key("code") - ok.String(*v.Code) - } - - if v.CodeVerifier != nil { - ok := object.Key("codeVerifier") - ok.String(*v.CodeVerifier) - } - - if v.GrantType != nil { - ok := object.Key("grantType") - ok.String(*v.GrantType) - } - - if v.RedirectUri != nil { - ok := object.Key("redirectUri") - ok.String(*v.RedirectUri) - } - - if v.RefreshToken != nil { - ok := object.Key("refreshToken") - ok.String(*v.RefreshToken) - } - - if v.RequestedTokenType != nil { - ok := object.Key("requestedTokenType") - ok.String(*v.RequestedTokenType) - } - - if v.Scope != nil { - ok := object.Key("scope") - if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { - return err - } - } - - if v.SubjectToken != nil { - ok := object.Key("subjectToken") - ok.String(*v.SubjectToken) - } - - if v.SubjectTokenType != nil { - ok := object.Key("subjectTokenType") - ok.String(*v.SubjectTokenType) - } - - return nil -} - -type awsRestjson1_serializeOpRegisterClient struct { -} - -func (*awsRestjson1_serializeOpRegisterClient) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*RegisterClientInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/client/register") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientName != nil { - ok := object.Key("clientName") - ok.String(*v.ClientName) - } - - if v.ClientType != nil { - ok := object.Key("clientType") - ok.String(*v.ClientType) - } - - if v.EntitledApplicationArn != nil { - ok := object.Key("entitledApplicationArn") - ok.String(*v.EntitledApplicationArn) - } - - if v.GrantTypes != nil { - ok := object.Key("grantTypes") - if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { - return err - } - } - - if v.IssuerUrl != nil { - ok := object.Key("issuerUrl") - ok.String(*v.IssuerUrl) - } - - if v.RedirectUris != nil { - ok := object.Key("redirectUris") - if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { - return err - } - } - - if v.Scopes != nil { - ok := object.Key("scopes") - if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { - return err - } - } - - return nil -} - -type awsRestjson1_serializeOpStartDeviceAuthorization struct { -} - -func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { - return "OperationSerializer" -} - -func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - opPath, opQuery := httpbinding.SplitURI("/device_authorization") - request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) - request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" - var restEncoder *httpbinding.Encoder - if request.URL.RawPath == "" { - restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - } else { - request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) - restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) - } - - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - restEncoder.SetHeader("Content-Type").String("application/json") - - jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = restEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - return nil -} - -func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { - object := value.Object() - defer object.Close() - - if v.ClientId != nil { - ok := object.Key("clientId") - ok.String(*v.ClientId) - } - - if v.ClientSecret != nil { - ok := object.Key("clientSecret") - ok.String(*v.ClientSecret) - } - - if v.StartUrl != nil { - ok := object.Key("startUrl") - ok.String(*v.StartUrl) - } - - return nil -} - -func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { - array := value.Array() - defer array.Close() - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go deleted file mode 100644 index 2cfe7b48f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ /dev/null @@ -1,428 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// You do not have sufficient access to perform this action. -type AccessDeniedException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AccessDeniedException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AccessDeniedException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AccessDeniedException" - } - return *e.ErrorCodeOverride -} -func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request to authorize a client with an access user session -// token is pending. -type AuthorizationPendingException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *AuthorizationPendingException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *AuthorizationPendingException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *AuthorizationPendingException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "AuthorizationPendingException" - } - return *e.ErrorCodeOverride -} -func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the token issued by the service is expired and is no longer -// valid. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that an error from the service occurred while trying to process a -// request. -type InternalServerException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InternalServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InternalServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InternalServerException" - } - return *e.ErrorCodeOverride -} -func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// Indicates that the clientId or clientSecret in the request is invalid. For -// example, this can occur when a client sends an incorrect clientId or an expired -// clientSecret . -type InvalidClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client information sent in the request during registration -// is invalid. -type InvalidClientMetadataException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidClientMetadataException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidClientMetadataException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidClientMetadataException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidClientMetadataException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateTokenrequest with an invalid grant type. -type InvalidGrantException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidGrantException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidGrantException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidGrantException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidGrantException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that one or more redirect URI in the request is not supported for -// this operation. -type InvalidRedirectUriException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidRedirectUriException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRedirectUriException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRedirectUriException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRedirectUriException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that something is wrong with the input to the request. For example, a -// required parameter might be missing or out of range. -type InvalidRequestException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that a token provided as input to the request was issued by and is -// only usable by calling IAM Identity Center endpoints in another region. -type InvalidRequestRegionException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - Endpoint *string - Region *string - - noSmithyDocumentSerde -} - -func (e *InvalidRequestRegionException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidRequestRegionException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidRequestRegionException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidRequestRegionException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the scope provided in the request is invalid. -type InvalidScopeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *InvalidScopeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidScopeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidScopeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidScopeException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is making the request too frequently and is more than -// the service can handle. -type SlowDownException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *SlowDownException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *SlowDownException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *SlowDownException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "SlowDownException" - } - return *e.ErrorCodeOverride -} -func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the client is not currently authorized to make the request. This -// can happen when a clientId is not issued for a public client. -type UnauthorizedClientException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnauthorizedClientException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnauthorizedClientException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnauthorizedClientException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnauthorizedClientException" - } - return *e.ErrorCodeOverride -} -func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Indicates that the grant type in the request is not supported by the service. -type UnsupportedGrantTypeException struct { - Message *string - - ErrorCodeOverride *string - - Error_ *string - Error_description *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedGrantTypeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedGrantTypeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedGrantTypeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedGrantTypeException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go deleted file mode 100644 index 0ec0789f8..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go +++ /dev/null @@ -1,9 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" -) - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go deleted file mode 100644 index 9c17e4c8e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package ssooidc - -import ( - "context" - "fmt" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpCreateToken struct { -} - -func (*validateOpCreateToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpCreateTokenWithIAM struct { -} - -func (*validateOpCreateTokenWithIAM) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*CreateTokenWithIAMInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpCreateTokenWithIAMInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpRegisterClient struct { -} - -func (*validateOpRegisterClient) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*RegisterClientInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpRegisterClientInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpStartDeviceAuthorization struct { -} - -func (*validateOpStartDeviceAuthorization) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*StartDeviceAuthorizationInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpStartDeviceAuthorizationInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) -} - -func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After) -} - -func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) -} - -func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) -} - -func validateOpCreateTokenInput(v *CreateTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.GrantType == nil { - invalidParams.Add(smithy.NewErrParamRequired("GrantType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpRegisterClientInput(v *RegisterClientInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} - if v.ClientName == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientName")) - } - if v.ClientType == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientType")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} - if v.ClientId == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientId")) - } - if v.ClientSecret == nil { - invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) - } - if v.StartUrl == nil { - invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md deleted file mode 100644 index 04b85dc8f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ /dev/null @@ -1,593 +0,0 @@ -# v1.33.8 (2025-01-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.7 (2025-01-14) - -* No change notes available for this release. - -# v1.33.6 (2025-01-10) - -* **Documentation**: Fixed typos in the descriptions. - -# v1.33.5 (2025-01-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.4 (2025-01-08) - -* No change notes available for this release. - -# v1.33.3 (2024-12-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.2 (2024-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.1 (2024-11-18) - -* **Dependency Update**: Update to smithy-go v1.22.1. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.33.0 (2024-11-14) - -* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. - -# v1.32.4 (2024-11-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.3 (2024-10-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.2 (2024-10-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.1 (2024-10-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.32.0 (2024-10-04) - -* **Feature**: Add support for HTTP client metrics. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.31.4 (2024-10-03) - -* No change notes available for this release. - -# v1.31.3 (2024-09-27) - -* No change notes available for this release. - -# v1.31.2 (2024-09-25) - -* No change notes available for this release. - -# v1.31.1 (2024-09-23) - -* No change notes available for this release. - -# v1.31.0 (2024-09-20) - -* **Feature**: Add tracing and metrics support to service clients. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.8 (2024-09-17) - -* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. - -# v1.30.7 (2024-09-04) - -* No change notes available for this release. - -# v1.30.6 (2024-09-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.5 (2024-08-22) - -* No change notes available for this release. - -# v1.30.4 (2024-08-15) - -* **Dependency Update**: Bump minimum Go version to 1.21. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.3 (2024-07-10.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.2 (2024-07-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.1 (2024-06-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.30.0 (2024-06-26) - -* **Feature**: Support list-of-string endpoint parameter. - -# v1.29.1 (2024-06-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.29.0 (2024-06-18) - -* **Feature**: Track usage of various AWS SDK features in user-agent string. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.13 (2024-06-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.12 (2024-06-07) - -* **Bug Fix**: Add clock skew correction on all service clients -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.11 (2024-06-03) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.10 (2024-05-23) - -* No change notes available for this release. - -# v1.28.9 (2024-05-16) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.8 (2024-05-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.7 (2024-05-08) - -* **Bug Fix**: GoDoc improvement - -# v1.28.6 (2024-03-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.5 (2024-03-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.4 (2024-03-07) - -* **Bug Fix**: Remove dependency on go-cmp. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.3 (2024-03-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.2 (2024-03-04) - -* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.1 (2024-02-23) - -* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.28.0 (2024-02-22) - -* **Feature**: Add middleware stack snapshot tests. - -# v1.27.2 (2024-02-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.27.1 (2024-02-20) - -* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. - -# v1.27.0 (2024-02-13) - -* **Feature**: Bump minimum Go version to 1.20 per our language support policy. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.7 (2024-01-04) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.6 (2023-12-20) - -* No change notes available for this release. - -# v1.26.5 (2023-12-08) - -* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. - -# v1.26.4 (2023-12-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.3 (2023-12-06) - -* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. -* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication. - -# v1.26.2 (2023-12-01) - -* **Bug Fix**: Correct wrapping of errors in authentication workflow. -* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.1 (2023-11-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.26.0 (2023-11-29) - -* **Feature**: Expose Options() accessor on service clients. -* **Documentation**: Documentation updates for AWS Security Token Service. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.6 (2023-11-28.2) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.5 (2023-11-28) - -* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. - -# v1.25.4 (2023-11-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.3 (2023-11-17) - -* **Documentation**: API updates for the AWS Security Token Service - -# v1.25.2 (2023-11-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.1 (2023-11-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.25.0 (2023-11-01) - -* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.24.0 (2023-10-31) - -* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.2 (2023-10-12) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.1 (2023-10-06) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.23.0 (2023-10-02) - -* **Feature**: STS API updates for assumeRole - -# v1.22.0 (2023-09-18) - -* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. -* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. - -# v1.21.5 (2023-08-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.4 (2023-08-18) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.3 (2023-08-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.2 (2023-08-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.21.1 (2023-08-01) - -* No change notes available for this release. - -# v1.21.0 (2023-07-31) - -* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.1 (2023-07-28) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.20.0 (2023-07-25) - -* **Feature**: API updates for the AWS Security Token Service - -# v1.19.3 (2023-07-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.2 (2023-06-15) - -* No change notes available for this release. - -# v1.19.1 (2023-06-13) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.19.0 (2023-05-08) - -* **Feature**: Documentation updates for AWS Security Token Service. - -# v1.18.11 (2023-05-04) - -* No change notes available for this release. - -# v1.18.10 (2023-04-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.9 (2023-04-10) - -* No change notes available for this release. - -# v1.18.8 (2023-04-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.7 (2023-03-21) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.6 (2023-03-10) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.5 (2023-02-22) - -* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. - -# v1.18.4 (2023-02-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.18.3 (2023-02-03) - -* **Dependency Update**: Updated to the latest SDK module versions -* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. - -# v1.18.2 (2023-01-25) - -* **Documentation**: Doc only change to update wording in a key topic - -# v1.18.1 (2023-01-23) - -* No change notes available for this release. - -# v1.18.0 (2023-01-05) - -* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). - -# v1.17.7 (2022-12-15) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.6 (2022-12-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.5 (2022-11-22) - -* No change notes available for this release. - -# v1.17.4 (2022-11-17) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.17.3 (2022-11-16) - -* No change notes available for this release. - -# v1.17.2 (2022-11-10) - -* No change notes available for this release. - -# v1.17.1 (2022-10-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.17.0 (2022-10-21) - -* **Feature**: Add presign functionality for sts:AssumeRole operation -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.19 (2022-09-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.18 (2022-09-14) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.17 (2022-09-02) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.16 (2022-08-31) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.15 (2022-08-30) - -* No change notes available for this release. - -# v1.16.14 (2022-08-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.13 (2022-08-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.12 (2022-08-09) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.11 (2022-08-08) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.10 (2022-08-01) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.9 (2022-07-05) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.8 (2022-06-29) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.7 (2022-06-07) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.6 (2022-05-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.5 (2022-05-16) - -* **Documentation**: Documentation updates for AWS Security Token Service. - -# v1.16.4 (2022-04-25) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.3 (2022-03-30) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.2 (2022-03-24) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.1 (2022-03-23) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.16.0 (2022-03-08) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Documentation**: Updated service client model to latest release. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.15.0 (2022-02-24) - -* **Feature**: API client updated -* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.14.0 (2022-01-14) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.13.0 (2022-01-07) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.12.0 (2021-12-21) - -* **Feature**: Updated to latest service endpoints - -# v1.11.1 (2021-12-02) - -* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.11.0 (2021-11-30) - -* **Feature**: API client updated - -# v1.10.1 (2021-11-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.10.0 (2021-11-12) - -* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. - -# v1.9.0 (2021-11-06) - -* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.8.0 (2021-10-21) - -* **Feature**: API client updated -* **Feature**: Updated to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.2 (2021-10-11) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.1 (2021-09-17) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.7.0 (2021-08-27) - -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.2 (2021-08-19) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.1 (2021-08-04) - -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.6.0 (2021-07-15) - -* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. -* **Documentation**: Updated service model to latest revision. -* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.5.0 (2021-06-25) - -* **Feature**: API client updated -* **Feature**: Updated `github.com/aws/smithy-go` to latest version -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.1 (2021-05-20) - -* **Dependency Update**: Updated to the latest SDK module versions - -# v1.4.0 (2021-05-14) - -* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. -* **Dependency Update**: Updated to the latest SDK module versions - diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go deleted file mode 100644 index 4e678ce2a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/defaults" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" - acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" - presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithydocument "github.com/aws/smithy-go/document" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net" - "net/http" - "sync/atomic" - "time" -) - -const ServiceID = "STS" -const ServiceAPIVersion = "2011-06-15" - -type operationMetrics struct { - Duration metrics.Float64Histogram - SerializeDuration metrics.Float64Histogram - ResolveIdentityDuration metrics.Float64Histogram - ResolveEndpointDuration metrics.Float64Histogram - SignRequestDuration metrics.Float64Histogram - DeserializeDuration metrics.Float64Histogram -} - -func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { - switch name { - case "client.call.duration": - return m.Duration - case "client.call.serialization_duration": - return m.SerializeDuration - case "client.call.resolve_identity_duration": - return m.ResolveIdentityDuration - case "client.call.resolve_endpoint_duration": - return m.ResolveEndpointDuration - case "client.call.signing_duration": - return m.SignRequestDuration - case "client.call.deserialization_duration": - return m.DeserializeDuration - default: - panic("unrecognized operation metric") - } -} - -func timeOperationMetric[T any]( - ctx context.Context, metric string, fn func() (T, error), - opts ...metrics.RecordMetricOption, -) (T, error) { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - start := time.Now() - v, err := fn() - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - return v, err -} - -func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { - instr := getOperationMetrics(ctx).histogramFor(metric) - opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) - - var ended bool - start := time.Now() - return func() { - if ended { - return - } - ended = true - - end := time.Now() - - elapsed := end.Sub(start) - instr.Record(ctx, float64(elapsed)/1e9, opts...) - } -} - -func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { - return func(o *metrics.RecordMetricOptions) { - o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) - o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) - } -} - -type operationMetricsKey struct{} - -func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { - meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - om := &operationMetrics{} - - var err error - - om.Duration, err = operationMetricTimer(meter, "client.call.duration", - "Overall call duration (including retries and time to send or receive request and response body)") - if err != nil { - return nil, err - } - om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", - "The time it takes to serialize a message body") - if err != nil { - return nil, err - } - om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", - "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") - if err != nil { - return nil, err - } - om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", - "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") - if err != nil { - return nil, err - } - om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", - "The time it takes to sign a request") - if err != nil { - return nil, err - } - om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", - "The time it takes to deserialize a message body") - if err != nil { - return nil, err - } - - return context.WithValue(parent, operationMetricsKey{}, om), nil -} - -func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { - return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = desc - }) -} - -func getOperationMetrics(ctx context.Context) *operationMetrics { - return ctx.Value(operationMetricsKey{}).(*operationMetrics) -} - -func operationTracer(p tracing.TracerProvider) tracing.Tracer { - return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts") -} - -// Client provides the API client to make operations call for AWS Security Token -// Service. -type Client struct { - options Options - - // Difference between the time reported by the server and the client - timeOffset *atomic.Int64 -} - -// New returns an initialized Client based on the functional options. Provide -// additional functional options to further configure the behavior of the client, -// such as changing the client's endpoint or adding custom middleware behavior. -func New(options Options, optFns ...func(*Options)) *Client { - options = options.Copy() - - resolveDefaultLogger(&options) - - setResolvedDefaultsMode(&options) - - resolveRetryer(&options) - - resolveHTTPClient(&options) - - resolveHTTPSignerV4(&options) - - resolveEndpointResolverV2(&options) - - resolveTracerProvider(&options) - - resolveMeterProvider(&options) - - resolveAuthSchemeResolver(&options) - - for _, fn := range optFns { - fn(&options) - } - - finalizeRetryMaxAttempts(&options) - - ignoreAnonymousAuth(&options) - - wrapWithAnonymousAuth(&options) - - resolveAuthSchemes(&options) - - client := &Client{ - options: options, - } - - initializeTimeOffsetResolver(client) - - return client -} - -// Options returns a copy of the client configuration. -// -// Callers SHOULD NOT perform mutations on any inner structures within client -// config. Config overrides should instead be made on a per-operation basis through -// functional options. -func (c *Client) Options() Options { - return c.options.Copy() -} - -func (c *Client) invokeOperation( - ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, -) ( - result interface{}, metadata middleware.Metadata, err error, -) { - ctx = middleware.ClearStackValues(ctx) - ctx = middleware.WithServiceID(ctx, ServiceID) - ctx = middleware.WithOperationName(ctx, opID) - - stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) - options := c.options.Copy() - - for _, fn := range optFns { - fn(&options) - } - - finalizeOperationRetryMaxAttempts(&options, *c) - - finalizeClientEndpointResolverOptions(&options) - - for _, fn := range stackFns { - if err := fn(stack, options); err != nil { - return nil, metadata, err - } - } - - for _, fn := range options.APIOptions { - if err := fn(stack); err != nil { - return nil, metadata, err - } - } - - ctx, err = withOperationMetrics(ctx, options.MeterProvider) - if err != nil { - return nil, metadata, err - } - - tracer := operationTracer(options.TracerProvider) - spanName := fmt.Sprintf("%s.%s", ServiceID, opID) - - ctx = tracing.WithOperationTracer(ctx, tracer) - - ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { - o.Kind = tracing.SpanKindClient - o.Properties.Set("rpc.system", "aws-api") - o.Properties.Set("rpc.method", opID) - o.Properties.Set("rpc.service", ServiceID) - }) - endTimer := startMetricTimer(ctx, "client.call.duration") - defer endTimer() - defer span.End() - - handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { - o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - }) - decorated := middleware.DecorateHandler(handler, stack) - result, metadata, err = decorated.Handle(ctx, params) - if err != nil { - span.SetProperty("exception.type", fmt.Sprintf("%T", err)) - span.SetProperty("exception.message", err.Error()) - - var aerr smithy.APIError - if errors.As(err, &aerr) { - span.SetProperty("api.error_code", aerr.ErrorCode()) - span.SetProperty("api.error_message", aerr.ErrorMessage()) - span.SetProperty("api.error_fault", aerr.ErrorFault().String()) - } - - err = &smithy.OperationError{ - ServiceID: ServiceID, - OperationName: opID, - Err: err, - } - } - - span.SetProperty("error", err != nil) - if err == nil { - span.SetStatus(tracing.SpanStatusOK) - } else { - span.SetStatus(tracing.SpanStatusError) - } - - return result, metadata, err -} - -type operationInputKey struct{} - -func setOperationInput(ctx context.Context, input interface{}) context.Context { - return middleware.WithStackValue(ctx, operationInputKey{}, input) -} - -func getOperationInput(ctx context.Context) interface{} { - return middleware.GetStackValue(ctx, operationInputKey{}) -} - -type setOperationInputMiddleware struct { -} - -func (*setOperationInputMiddleware) ID() string { - return "setOperationInput" -} - -func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - ctx = setOperationInput(ctx, in.Parameters) - return next.HandleSerialize(ctx, in) -} - -func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { - if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { - return fmt.Errorf("add ResolveAuthScheme: %w", err) - } - if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { - return fmt.Errorf("add GetIdentity: %v", err) - } - if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { - return fmt.Errorf("add ResolveEndpointV2: %v", err) - } - if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { - return fmt.Errorf("add Signing: %w", err) - } - return nil -} -func resolveAuthSchemeResolver(options *Options) { - if options.AuthSchemeResolver == nil { - options.AuthSchemeResolver = &defaultAuthSchemeResolver{} - } -} - -func resolveAuthSchemes(options *Options) { - if options.AuthSchemes == nil { - options.AuthSchemes = []smithyhttp.AuthScheme{ - internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ - Signer: options.HTTPSignerV4, - Logger: options.Logger, - LogSigning: options.ClientLogMode.IsSigning(), - }), - } - } -} - -type noSmithyDocumentSerde = smithydocument.NoSerde - -type legacyEndpointContextSetter struct { - LegacyResolver EndpointResolver -} - -func (*legacyEndpointContextSetter) ID() string { - return "legacyEndpointContextSetter" -} - -func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - if m.LegacyResolver != nil { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) - } - - return next.HandleInitialize(ctx, in) - -} -func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { - return stack.Initialize.Add(&legacyEndpointContextSetter{ - LegacyResolver: o.EndpointResolver, - }, middleware.Before) -} - -func resolveDefaultLogger(o *Options) { - if o.Logger != nil { - return - } - o.Logger = logging.Nop{} -} - -func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { - return middleware.AddSetLoggerMiddleware(stack, o.Logger) -} - -func setResolvedDefaultsMode(o *Options) { - if len(o.resolvedDefaultsMode) > 0 { - return - } - - var mode aws.DefaultsMode - mode.SetFromString(string(o.DefaultsMode)) - - if mode == aws.DefaultsModeAuto { - mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) - } - - o.resolvedDefaultsMode = mode -} - -// NewFromConfig returns a new client from the provided config. -func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { - opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, - } - resolveAWSRetryerProvider(cfg, &opts) - resolveAWSRetryMaxAttempts(cfg, &opts) - resolveAWSRetryMode(cfg, &opts) - resolveAWSEndpointResolver(cfg, &opts) - resolveUseDualStackEndpoint(cfg, &opts) - resolveUseFIPSEndpoint(cfg, &opts) - resolveBaseEndpoint(cfg, &opts) - return New(opts, optFns...) -} - -func resolveHTTPClient(o *Options) { - var buildable *awshttp.BuildableClient - - if o.HTTPClient != nil { - var ok bool - buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) - if !ok { - return - } - } else { - buildable = awshttp.NewBuildableClient() - } - - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { - if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { - dialer.Timeout = dialerTimeout - } - }) - - buildable = buildable.WithTransportOptions(func(transport *http.Transport) { - if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { - transport.TLSHandshakeTimeout = tlsHandshakeTimeout - } - }) - } - - o.HTTPClient = buildable -} - -func resolveRetryer(o *Options) { - if o.Retryer != nil { - return - } - - if len(o.RetryMode) == 0 { - modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) - if err == nil { - o.RetryMode = modeConfig.RetryMode - } - } - if len(o.RetryMode) == 0 { - o.RetryMode = aws.RetryModeStandard - } - - var standardOptions []func(*retry.StandardOptions) - if v := o.RetryMaxAttempts; v != 0 { - standardOptions = append(standardOptions, func(so *retry.StandardOptions) { - so.MaxAttempts = v - }) - } - - switch o.RetryMode { - case aws.RetryModeAdaptive: - var adaptiveOptions []func(*retry.AdaptiveModeOptions) - if len(standardOptions) != 0 { - adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { - ao.StandardOptions = append(ao.StandardOptions, standardOptions...) - }) - } - o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) - - default: - o.Retryer = retry.NewStandard(standardOptions...) - } -} - -func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { - if cfg.Retryer == nil { - return - } - o.Retryer = cfg.Retryer() -} - -func resolveAWSRetryMode(cfg aws.Config, o *Options) { - if len(cfg.RetryMode) == 0 { - return - } - o.RetryMode = cfg.RetryMode -} -func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { - if cfg.RetryMaxAttempts == 0 { - return - } - o.RetryMaxAttempts = cfg.RetryMaxAttempts -} - -func finalizeRetryMaxAttempts(o *Options) { - if o.RetryMaxAttempts == 0 { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func finalizeOperationRetryMaxAttempts(o *Options, client Client) { - if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { - return - } - - o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) -} - -func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { - if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { - return - } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) -} - -func addClientUserAgent(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion) - if len(options.AppID) > 0 { - ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) - } - - return nil -} - -func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { - id := (*awsmiddleware.RequestUserAgent)(nil).ID() - mw, ok := stack.Build.Get(id) - if !ok { - mw = awsmiddleware.NewRequestUserAgent() - if err := stack.Build.Add(mw, middleware.After); err != nil { - return nil, err - } - } - - ua, ok := mw.(*awsmiddleware.RequestUserAgent) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) - } - - return ua, nil -} - -type HTTPSignerV4 interface { - SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error -} - -func resolveHTTPSignerV4(o *Options) { - if o.HTTPSignerV4 != nil { - return - } - o.HTTPSignerV4 = newDefaultV4Signer(*o) -} - -func newDefaultV4Signer(o Options) *v4.Signer { - return v4.NewSigner(func(so *v4.SignerOptions) { - so.Logger = o.Logger - so.LogSigning = o.ClientLogMode.IsSigning() - }) -} - -func addClientRequestID(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) -} - -func addComputeContentLength(stack *middleware.Stack) error { - return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) -} - -func addRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) -} - -func addRecordResponseTiming(stack *middleware.Stack) error { - return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) -} - -func addSpanRetryLoop(stack *middleware.Stack, options Options) error { - return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) -} - -type spanRetryLoop struct { - options Options -} - -func (*spanRetryLoop) ID() string { - return "spanRetryLoop" -} - -func (m *spanRetryLoop) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - middleware.FinalizeOutput, middleware.Metadata, error, -) { - tracer := operationTracer(m.options.TracerProvider) - ctx, span := tracer.StartSpan(ctx, "RetryLoop") - defer span.End() - - return next.HandleFinalize(ctx, in) -} -func addStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) -} - -func addUnsignedPayload(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) -} - -func addComputePayloadSHA256(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) -} - -func addContentSHA256Header(stack *middleware.Stack) error { - return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) -} - -func addIsWaiterUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) - return nil - }) -} - -func addIsPaginatorUserAgent(o *Options) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) - return nil - }) -} - -func addRetry(stack *middleware.Stack, o Options) error { - attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { - m.LogAttempts = o.ClientLogMode.IsRetries() - m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") - }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { - return err - } - if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { - return err - } - return nil -} - -// resolves dual-stack endpoint configuration -func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseDualStackEndpoint = value - } - return nil -} - -// resolves FIPS endpoint configuration -func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { - if len(cfg.ConfigSources) == 0 { - return nil - } - value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) - if err != nil { - return err - } - if found { - o.EndpointOptions.UseFIPSEndpoint = value - } - return nil -} - -func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { - if mode == aws.AccountIDEndpointModeDisabled { - return nil - } - - if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { - return aws.String(ca.Credentials.AccountID) - } - - return nil -} - -func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { - mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} - if err := stack.Build.Add(&mw, middleware.After); err != nil { - return err - } - return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) -} -func initializeTimeOffsetResolver(c *Client) { - c.timeOffset = new(atomic.Int64) -} - -func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { - ua, err := getOrAddRequestUserAgent(stack) - if err != nil { - return err - } - - switch options.Retryer.(type) { - case *retry.Standard: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) - case *retry.AdaptiveMode: - ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) - } - return nil -} - -func resolveTracerProvider(options *Options) { - if options.TracerProvider == nil { - options.TracerProvider = &tracing.NopTracerProvider{} - } -} - -func resolveMeterProvider(options *Options) { - if options.MeterProvider == nil { - options.MeterProvider = metrics.NopMeterProvider{} - } -} - -func addRecursionDetection(stack *middleware.Stack) error { - return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) -} - -func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) - -} - -func addResponseErrorMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) - -} - -// HTTPPresignerV4 represents presigner interface used by presign url client -type HTTPPresignerV4 interface { - PresignHTTP( - ctx context.Context, credentials aws.Credentials, r *http.Request, - payloadHash string, service string, region string, signingTime time.Time, - optFns ...func(*v4.SignerOptions), - ) (url string, signedHeader http.Header, err error) -} - -// PresignOptions represents the presign client options -type PresignOptions struct { - - // ClientOptions are list of functional options to mutate client options used by - // the presign client. - ClientOptions []func(*Options) - - // Presigner is the presigner used by the presign url client - Presigner HTTPPresignerV4 -} - -func (o PresignOptions) copy() PresignOptions { - clientOptions := make([]func(*Options), len(o.ClientOptions)) - copy(clientOptions, o.ClientOptions) - o.ClientOptions = clientOptions - return o -} - -// WithPresignClientFromClientOptions is a helper utility to retrieve a function -// that takes PresignOption as input -func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { - return withPresignClientFromClientOptions(optFns).options -} - -type withPresignClientFromClientOptions []func(*Options) - -func (w withPresignClientFromClientOptions) options(o *PresignOptions) { - o.ClientOptions = append(o.ClientOptions, w...) -} - -// PresignClient represents the presign url client -type PresignClient struct { - client *Client - options PresignOptions -} - -// NewPresignClient generates a presign client using provided API Client and -// presign options -func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { - var options PresignOptions - for _, fn := range optFns { - fn(&options) - } - if len(options.ClientOptions) != 0 { - c = New(c.options, options.ClientOptions...) - } - - if options.Presigner == nil { - options.Presigner = newDefaultV4Signer(c.options) - } - - return &PresignClient{ - client: c, - options: options, - } -} - -func withNopHTTPClientAPIOption(o *Options) { - o.HTTPClient = smithyhttp.NopClient{} -} - -type presignContextPolyfillMiddleware struct { -} - -func (*presignContextPolyfillMiddleware) ID() string { - return "presignContextPolyfill" -} - -func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - schemeID := rscheme.Scheme.SchemeID() - - if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { - if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr) - } - } else if schemeID == "aws.auth#sigv4a" { - if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningName(ctx, sn) - } - if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { - ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) - } - } - - return next.HandleFinalize(ctx, in) -} - -type presignConverter PresignOptions - -func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { - stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { - stack.Finalize.Remove((*retry.Attempt)(nil).ID()) - } - if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { - stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) - } - stack.Deserialize.Clear() - stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) - stack.Build.Remove("UserAgent") - if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { - return err - } - - pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: options.Credentials, - Presigner: c.Presigner, - LogSigning: options.ClientLogMode.IsSigning(), - }) - if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { - return err - } - if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { - return err - } - // convert request to a GET request - err = query.AddAsGetRequestMiddleware(stack) - if err != nil { - return err - } - err = presignedurlcust.AddAsIsPresigningMiddleware(stack) - if err != nil { - return err - } - return nil -} - -func addRequestResponseLogging(stack *middleware.Stack, o Options) error { - return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ - LogRequest: o.ClientLogMode.IsRequest(), - LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), - LogResponse: o.ClientLogMode.IsResponse(), - LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), - }, middleware.After) -} - -type disableHTTPSMiddleware struct { - DisableHTTPS bool -} - -func (*disableHTTPSMiddleware) ID() string { - return "disableHTTPS" -} - -func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { - req.URL.Scheme = "http" - } - - return next.HandleFinalize(ctx, in) -} - -func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { - return stack.Finalize.Insert(&disableHTTPSMiddleware{ - DisableHTTPS: o.EndpointOptions.DisableHTTPS, - }, "ResolveEndpointV2", middleware.After) -} - -type spanInitializeStart struct { -} - -func (*spanInitializeStart) ID() string { - return "spanInitializeStart" -} - -func (m *spanInitializeStart) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "Initialize") - - return next.HandleInitialize(ctx, in) -} - -type spanInitializeEnd struct { -} - -func (*spanInitializeEnd) ID() string { - return "spanInitializeEnd" -} - -func (m *spanInitializeEnd) HandleInitialize( - ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, -) ( - middleware.InitializeOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleInitialize(ctx, in) -} - -type spanBuildRequestStart struct { -} - -func (*spanBuildRequestStart) ID() string { - return "spanBuildRequestStart" -} - -func (m *spanBuildRequestStart) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - middleware.SerializeOutput, middleware.Metadata, error, -) { - ctx, _ = tracing.StartSpan(ctx, "BuildRequest") - - return next.HandleSerialize(ctx, in) -} - -type spanBuildRequestEnd struct { -} - -func (*spanBuildRequestEnd) ID() string { - return "spanBuildRequestEnd" -} - -func (m *spanBuildRequestEnd) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - middleware.BuildOutput, middleware.Metadata, error, -) { - ctx, span := tracing.PopSpan(ctx) - span.End() - - return next.HandleBuild(ctx, in) -} - -func addSpanInitializeStart(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) -} - -func addSpanInitializeEnd(stack *middleware.Stack) error { - return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) -} - -func addSpanBuildRequestStart(stack *middleware.Stack) error { - return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) -} - -func addSpanBuildRequestEnd(stack *middleware.Stack) error { - return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go deleted file mode 100644 index d05632774..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ /dev/null @@ -1,547 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources. These temporary credentials consist of an access -// key ID, a secret access key, and a security token. Typically, you use AssumeRole -// within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the -// IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRole can be used to make -// API calls to any Amazon Web Services service with the following exception: You -// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken -// API operations. -// -// (Optional) You can pass inline or managed session policies to this operation. -// You can pass a single JSON policy document to use as an inline session policy. -// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use -// as managed session policies. The plaintext that you use for both inline and -// managed session policies can't exceed 2,048 characters. Passing policies to this -// operation returns new temporary credentials. The resulting session's permissions -// are the intersection of the role's identity-based policy and the session -// policies. You can use the role's temporary credentials in subsequent Amazon Web -// Services API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// When you create a role, you create two policies: a role trust policy that -// specifies who can assume the role, and a permissions policy that specifies what -// can be done with the role. You specify the trusted principal that is allowed to -// assume the role in the role trust policy. -// -// To assume a role from a different account, your Amazon Web Services account -// must be trusted by the role. The trust relationship is defined in the role's -// trust policy when the role is created. That trust policy states which accounts -// are allowed to delegate that access to users in the account. -// -// A user who wants to access a role in a different account must also have -// permissions that are delegated from the account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN of the -// role in the other account. -// -// To allow a user to assume a role in the same account, you can do either of the -// following: -// -// - Attach a policy to the user that allows the user to call AssumeRole (as long -// as the role's trust policy trusts the account). -// -// - Add the user as a principal directly in the role's trust policy. -// -// You can do either because the role’s trust policy acts as an IAM resource-based -// policy. When a resource-based policy grants access to a principal in the same -// account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These tags are -// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM -// User Guide. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # Using MFA with AssumeRole -// -// (Optional) You can include multi-factor authentication (MFA) information when -// you call AssumeRole . This is useful for cross-account scenarios to ensure that -// the user that assumes the role has been authenticated with an Amazon Web -// Services MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication. If the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication might -// look like the following example. -// -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} -// -// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. -// -// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode -// parameters. The SerialNumber value identifies the user's hardware or virtual -// MFA device. The TokenCode is the time-based one-time password (TOTP) that the -// MFA device produces. -// -// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { - if params == nil { - params = &AssumeRoleInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleInput struct { - - // The Amazon Resource Name (ARN) of the role to assume. - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. - // - // Use the role session name to uniquely identify a session when the same role is - // assumed by different principals or for different reasons. In cross-account - // scenarios, the role session name is visible to, and can be logged by the account - // that owns the role. The role session name is also used in the ARN of the assumed - // role principal. This means that subsequent cross-account API requests that use - // the temporary security credentials will expose the role session name to the - // external account in their CloudTrail logs. - // - // For security purposes, administrators can view this field in [CloudTrail logs] to help identify - // who performed an action in Amazon Web Services. Your administrator might require - // that you specify your user name as the session name when you assume the role. - // For more information, see [sts:RoleSessionName]sts:RoleSessionName . - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds - // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname - // - // This member is required. - RoleSessionName *string - - // The duration, in seconds, of the role session. The value specified can range - // from 900 seconds (15 minutes) up to the maximum session duration set for the - // role. The maximum session duration setting can have a value from 1 hour to 12 - // hours. If you specify a value higher than this setting or the administrator - // setting (whichever is lower), the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. - // - // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API - // role session to a maximum of one hour. When you use the AssumeRole API - // operation to assume a role, you can specify the duration of your role session - // with the DurationSeconds parameter. You can specify a parameter value of up to - // 43200 seconds (12 hours), depending on the maximum session duration setting for - // your role. However, if you assume a role using role chaining and provide a - // DurationSeconds parameter value greater than one hour, the operation fails. To - // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // A unique identifier that might be required when you assume a role in another - // account. If the administrator of the account to which the role belongs provided - // you with an external ID, then provide that value in the ExternalId parameter. - // This value can be any string, such as a passphrase or account number. A - // cross-account role is usually set up to trust everyone in an account. Therefore, - // the administrator of the trusting account might send an external ID to the - // administrator of the trusted account. That way, only someone with the ID can - // assume the role, rather than everyone in the account. For more information about - // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@:/- - // - // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html - ExternalId *string - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // For more information about role session permissions, see [Session policies]. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // A list of previously acquired trusted context assertions in the format of a - // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. - // - // The following is an example of a ProvidedContext value that includes a single - // trusted context assertion and the ARN of the context provider from which the - // trusted context assertion was generated. - // - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] - ProvidedContexts []types.ProvidedContext - - // The identification number of the MFA device that is associated with the user - // who is making the AssumeRole call. Specify this value if the trust policy of - // the role being assumed includes a condition that requires MFA authentication. - // The value is either the serial number for a hardware device (such as - // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - SerialNumber *string - - // The source identity specified by the principal that is calling the AssumeRole - // operation. The source identity value persists across [chained role]sessions. - // - // You can require users to specify a source identity when they assume a role. You - // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. - // You can use source identity information in CloudTrail logs to determine who took - // actions with a role. You can use the aws:SourceIdentity condition key to - // further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the - // IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: +=,.@-. You cannot use a - // value that begins with the text aws: . This prefix is reserved for Amazon Web - // Services internal use. - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity - SourceIdentity *string - - // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] - // in the IAM User Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 - // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the role. When you do, session tags override a role tag with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This means - // that you cannot have separate Department and department tag keys. Assume that - // the role has the Department = Marketing tag and you pass the department = - // engineering session tag. Department and department are not saved as separate - // tags, and the session tag passed in the request takes precedence over the role - // tag. - // - // Additionally, if you used temporary credentials to perform this operation, the - // new session inherits any transitive session tags from the calling session. If - // you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the CloudTrail logs. For - // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. - // - // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs - Tags []types.Tag - - // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA. (In other words, if the policy includes a condition that - // tests for MFA). If the role being assumed requires MFA and if the TokenCode - // value is missing or expired, the AssumeRole call returns an "access denied" - // error. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string - - // A list of keys for session tags that you want to set as transitive. If you set - // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. - // - // This parameter is optional. The transitive status of a session tag does not - // impact its packed binary size. - // - // If you choose not to specify a transitive tag key, then no tags are passed from - // this session to any subsequent sessions. - // - // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining - TransitiveTagKeys []string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRole request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The source identity specified by the principal that is calling the AssumeRole - // operation. - // - // You can require users to specify a source identity when they assume a role. You - // do this by using the sts:SourceIdentity condition key in a role trust policy. - // You can use source identity information in CloudTrail logs to determine who took - // actions with a role. You can use the aws:SourceIdentity condition key to - // further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the - // IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRole", - } -} - -// PresignAssumeRole is used to generate a presigned HTTP Request which contains -// presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &AssumeRoleInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, - c.client.addOperationAssumeRoleMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go deleted file mode 100644 index d0e117ac9..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ /dev/null @@ -1,455 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated via a SAML authentication response. This operation provides a -// mechanism for tying an enterprise identity store or directory to role-based -// Amazon Web Services access without user-specific credentials or configuration. -// For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// The temporary security credentials returned by this operation consist of an -// access key ID, a secret access key, and a security token. Applications can use -// these temporary security credentials to sign calls to Amazon Web Services -// services. -// -// # Session Duration -// -// By default, the temporary security credentials created by AssumeRoleWithSAML -// last for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. Your role session lasts for the -// duration that you specify, or until the time specified in the SAML -// authentication response's SessionNotOnOrAfter value, whichever is shorter. You -// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the -// maximum session duration setting for the role. This setting can have a value -// from 1 hour to 12 hours. To learn how to view the maximum value for your role, -// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console URL. -// For more information, see [Using IAM Roles]in the IAM User Guide. -// -// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one -// hour. When you use the AssumeRole API operation to assume a role, you can -// specify the duration of your role session with the DurationSeconds parameter. -// You can specify a parameter value of up to 43200 seconds (12 hours), depending -// on the maximum session duration setting for your role. However, if you assume a -// role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithSAML can be used to -// make API calls to any Amazon Web Services service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. -// -// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a -// single JSON policy document to use as an inline session policy. You can also -// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed -// session policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. Passing policies to this operation -// returns new temporary credentials. The resulting session's permissions are the -// intersection of the role's identity-based policy and the session policies. You -// can use the role's temporary credentials in subsequent Amazon Web Services API -// calls to access resources in the account that owns the role. You cannot use -// session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services -// security credentials. The identity of the caller is validated by using keys in -// the metadata document that is uploaded for the SAML provider entity for your -// identity provider. -// -// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The -// entry includes the value in the NameID element of the SAML assertion. We -// recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the -// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your SAML -// assertion as session tags. Each session tag consists of a key name and an -// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User -// Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed -// 128 characters and the values can’t exceed 256 characters. For these and -// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed inline session policy, -// managed policy ARNs, and session tags into a packed binary format that has a -// separate limit. Your request can fail for this limit even if your plaintext -// meets the other requirements. The PackedPolicySize response element indicates -// by percentage how close the policies and tags for your request are to the upper -// size limit. -// -// You can pass a session tag with the same key as a tag that is attached to the -// role. When you do, session tags override the role's tags with the same key. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # SAML Configuration -// -// Before your application can call AssumeRoleWithSAML , you must configure your -// SAML identity provider (IdP) to issue the claims required by Amazon Web -// Services. Additionally, you must use Identity and Access Management (IAM) to -// create a SAML provider entity in your Amazon Web Services account that -// represents your identity provider. You must also create an IAM role that -// specifies this SAML provider in its trust policy. -// -// For more information, see the following resources: -// -// [About SAML 2.0-based Federation] -// - in the IAM User Guide. -// -// [Creating SAML Identity Providers] -// - in the IAM User Guide. -// -// [Configuring a Relying Party and Claims] -// - in the IAM User Guide. -// -// [Creating a Role for SAML 2.0 Federation] -// - in the IAM User Guide. -// -// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session -// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html -// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html -// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining -// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { - if params == nil { - params = &AssumeRoleWithSAMLInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithSAMLOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithSAMLInput struct { - - // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the - // IdP. - // - // This member is required. - PrincipalArn *string - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // This member is required. - RoleArn *string - - // The base64 encoded SAML authentication response provided by the IdP. - // - // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. - // - // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html - // - // This member is required. - SAMLAssertion *string - - // The duration, in seconds, of the role session. Your role session lasts for the - // duration that you specify for the DurationSeconds parameter, or until the time - // specified in the SAML authentication response's SessionNotOnOrAfter value, - // whichever is shorter. You can provide a DurationSeconds value from 900 seconds - // (15 minutes) up to the maximum session duration setting for the role. This - // setting can have a value from 1 hour to 12 hours. If you specify a value higher - // than this setting, the operation fails. For example, if you specify a session - // duration of 12 hours, but your administrator set the maximum session duration to - // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // For more information about role session permissions, see [Session policies]. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleWithSAMLOutput struct { - - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser *types.AssumedRoleUser - - // The value of the Recipient attribute of the SubjectConfirmationData element of - // the SAML assertion. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // The value of the Issuer element of the SAML assertion. - Issuer *string - - // A hash value based on the concatenation of the following: - // - // - The Issuer response value. - // - // - The Amazon Web Services account ID. - // - // - The friendly name (the last part of the ARN) of the SAML provider in IAM. - // - // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. - // - // The following pseudocode shows how the hash value is calculated: - // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) - NameQualifier *string - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The value in the SourceIdentity attribute in the SAML assertion. The source - // identity value persists across [chained role]sessions. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with that - // user. After the source identity is set, the value cannot be changed. It is - // present in the request for all actions that are taken by the role and persists - // across [chained role]sessions. You can configure your SAML identity provider to use an - // attribute associated with your users, like user name or email, as the source - // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to - // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in - // the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // The value of the NameID element in the Subject element of the SAML assertion. - Subject *string - - // The format of the name ID, as defined by the Format attribute in the NameID - // element of the SAML assertion. Typical examples of the format are transient or - // persistent . - // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , - // that prefix is removed. For example, - // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . - // If the format includes any other prefix, the format is returned with no - // modifications. - SubjectType *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithSAML", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go deleted file mode 100644 index 0ae4bc173..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ /dev/null @@ -1,475 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials for users who have been -// authenticated in a mobile or web application with a web identity provider. -// Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. -// -// For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also -// supply the user with a consistent identity throughout the lifetime of an -// application. -// -// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web -// Services security credentials. Therefore, you can distribute an application (for -// example, on mobile devices) that requests temporary security credentials without -// including long-term Amazon Web Services credentials in the application. You also -// don't need to deploy server-based proxy services that use long-term Amazon Web -// Services credentials. Instead, the identity of the caller is validated by using -// a token from the web identity provider. For a comparison of -// AssumeRoleWithWebIdentity with the other API operations that produce temporary -// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// The temporary security credentials returned by this API consist of an access -// key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to Amazon Web Services service API -// operations. -// -// # Session Duration -// -// By default, the temporary security credentials created by -// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional -// DurationSeconds parameter to specify the duration of your session. You can -// provide a value from 900 seconds (15 minutes) up to the maximum session duration -// setting for the role. This setting can have a value from 1 hour to 12 hours. To -// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. -// The maximum session duration limit applies when you use the AssumeRole* API -// operations or the assume-role* CLI commands. However the limit does not apply -// when you use those operations to create a console URL. For more information, see -// [Using IAM Roles]in the IAM User Guide. -// -// # Permissions -// -// The temporary security credentials created by AssumeRoleWithWebIdentity can be -// used to make API calls to any Amazon Web Services service with the following -// exception: you cannot call the STS GetFederationToken or GetSessionToken API -// operations. -// -// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a -// single JSON policy document to use as an inline session policy. You can also -// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed -// session policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. Passing policies to this operation -// returns new temporary credentials. The resulting session's permissions are the -// intersection of the role's identity-based policy and the session policies. You -// can use the role's temporary credentials in subsequent Amazon Web Services API -// calls to access resources in the account that owns the role. You cannot use -// session policies to grant more permissions than those allowed by the -// identity-based policy of the role that is being assumed. For more information, -// see [Session Policies]in the IAM User Guide. -// -// # Tags -// -// (Optional) You can configure your IdP to pass attributes into your web identity -// token as session tags. Each session tag consists of a key name and an associated -// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. -// -// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed -// 128 characters and the values can’t exceed 256 characters. For these and -// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. -// -// An Amazon Web Services conversion compresses the passed inline session policy, -// managed policy ARNs, and session tags into a packed binary format that has a -// separate limit. Your request can fail for this limit even if your plaintext -// meets the other requirements. The PackedPolicySize response element indicates -// by percentage how close the policies and tags for your request are to the upper -// size limit. -// -// You can pass a session tag with the same key as a tag that is attached to the -// role. When you do, the session tag overrides the role tag with the same key. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// You can set the session tags as transitive. Transitive tags persist during role -// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. -// -// # Identities -// -// Before your application can call AssumeRoleWithWebIdentity , you must have an -// identity token from a supported identity provider and create a role that the -// application can assume. The role that your application assumes must trust the -// identity provider that is associated with the identity token. In other words, -// the identity provider must be specified in the role's trust policy. -// -// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail -// logs. The entry includes the [Subject]of the provided web identity token. We recommend -// that you avoid using any personally identifiable information (PII) in this -// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. -// -// For more information about how to use OIDC federation and the -// AssumeRoleWithWebIdentity API, see the following resources: -// -// [Using Web Identity Federation API Operations for Mobile Apps] -// - and [Federation Through a Web-based Identity Provider]. -// -// [Amazon Web Services SDK for iOS Developer Guide] -// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the -// identity providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. -// -// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ -// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ -// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length -// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html -// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity -// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining -// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration -// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html -// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes -func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { - if params == nil { - params = &AssumeRoleWithWebIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRoleWithWebIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRoleWithWebIdentityInput struct { - - // The Amazon Resource Name (ARN) of the role that the caller is assuming. - // - // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. - // The trust policies of these roles must accept the cognito-identity.amazonaws.com - // service principal and must contain the cognito-identity.amazonaws.com:aud - // condition key to restrict role assumption to users from your intended identity - // pools. A policy that trusts Amazon Cognito identity pools without this condition - // creates a risk that a user from an unintended identity pool can assume the role. - // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide. - // - // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html - // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies - // - // This member is required. - RoleArn *string - - // An identifier for the assumed role session. Typically, you pass the name or - // identifier that is associated with the user who is using your application. That - // way, the temporary security credentials that your application will use are - // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. - // - // For security purposes, administrators can view this field in [CloudTrail logs] to help identify - // who performed an action in Amazon Web Services. Your administrator might require - // that you specify your user name as the session name when you assume the role. - // For more information, see [sts:RoleSessionName]sts:RoleSessionName . - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds - // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname - // - // This member is required. - RoleSessionName *string - - // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the - // identity provider. Your application must get this token by authenticating the - // user who is using your application with a web identity provider before the - // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token - // must be formatted as either an integer or a long integer. Tokens must be signed - // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or - // ES512). - // - // This member is required. - WebIdentityToken *string - - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify a - // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. - // - // By default, the value is set to 3600 seconds. - // - // The DurationSeconds parameter is separate from the duration of a console - // session that you might request using the returned credentials. The request to - // the federation endpoint for a console sign-in token takes a SessionDuration - // parameter that specifies the maximum length of the console session. For more - // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. - // - // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session - // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // This parameter is optional. Passing policies to this operation returns new - // temporary credentials. The resulting session's permissions are the intersection - // of the role's identity-based policy and the session policies. You can use the - // role's temporary credentials in subsequent Amazon Web Services API calls to - // access resources in the account that owns the role. You cannot use session - // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM - // User Guide. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // For more information about role session permissions, see [Session policies]. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as managed session policies. The policies must exist in the same account as - // the role. - // - // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plaintext that you use for both inline and managed session policies - // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the - // Amazon Web Services General Reference. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // Passing policies to this operation returns new temporary credentials. The - // resulting session's permissions are the intersection of the role's - // identity-based policy and the session policies. You can use the role's temporary - // credentials in subsequent Amazon Web Services API calls to access resources in - // the account that owns the role. You cannot use session policies to grant more - // permissions than those allowed by the identity-based policy of the role that is - // being assumed. For more information, see [Session Policies]in the IAM User Guide. - // - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // The fully qualified host component of the domain name of the OAuth 2.0 identity - // provider. Do not specify this value for an OpenID Connect identity provider. - // - // Currently www.amazon.com and graph.facebook.com are the only supported identity - // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. - // - // Do not specify this value for OpenID Connect ID tokens. - ProviderId *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type AssumeRoleWithWebIdentityOutput struct { - - // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers - // that you can use to refer to the resulting temporary security credentials. For - // example, you can reference these credentials as a principal in a resource-based - // policy by using the ARN or assumed role ID. The ARN and ID include the - // RoleSessionName that you specified when you called AssumeRole . - AssumedRoleUser *types.AssumedRoleUser - - // The intended audience (also known as client ID) of the web identity token. This - // is traditionally the client identifier issued to the application that requested - // the web identity token. - Audience *string - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // The issuing authority of the web identity token presented. For OpenID Connect - // ID tokens, this contains the value of the iss field. For OAuth 2.0 access - // tokens, this contains the value of the ProviderId parameter that was passed in - // the AssumeRoleWithWebIdentity request. - Provider *string - - // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. - // - // You can require users to set a source identity value when they assume a role. - // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. That way, actions that are taken with the role are associated with that - // user. After the source identity is set, the value cannot be changed. It is - // present in the request for all actions that are taken by the role and persists - // across [chained role]sessions. You can configure your identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon - // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html - SourceIdentity *string - - // The unique user identifier that is returned by the identity provider. This - // identifier is associated with the WebIdentityToken that was submitted with the - // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user - // and the application that acquired the WebIdentityToken (pairwise identifier). - // For OpenID Connect ID tokens, this field contains the value returned by the - // identity provider as the token's sub (Subject) claim. - SubjectFromWebIdentityToken *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoleWithWebIdentity", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go deleted file mode 100644 index cd976e573..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go +++ /dev/null @@ -1,220 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of short term credentials you can use to perform privileged tasks -// on a member account in your organization. -// -// Before you can launch a privileged session, you must have centralized root -// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM -// User Guide. -// -// The STS global endpoint is not supported for AssumeRoot. You must send this -// request to a Regional STS endpoint. For more information, see [Endpoints]. -// -// You can track AssumeRoot in CloudTrail logs to determine what actions were -// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide. -// -// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints -// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html -// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html -func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) { - if params == nil { - params = &AssumeRootInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*AssumeRootOutput) - out.ResultMetadata = metadata - return out, nil -} - -type AssumeRootInput struct { - - // The member account principal ARN or account ID. - // - // This member is required. - TargetPrincipal *string - - // The identity based policy that scopes the session to the privileged tasks that - // can be performed. You can use one of following Amazon Web Services managed - // policies to scope root session actions. - // - // [IAMAuditRootUserCredentials] - // - // [IAMCreateRootUserPassword] - // - // [IAMDeleteRootUserCredentials] - // - // [S3UnlockBucketPolicy] - // - // [SQSUnlockQueuePolicy] - // - // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials - // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword - // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials - // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy - // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy - // - // This member is required. - TaskPolicyArn *types.PolicyDescriptorType - - // The duration, in seconds, of the privileged session. The value can range from 0 - // seconds up to the maximum session duration of 900 seconds (15 minutes). If you - // specify a value higher than this setting, the operation fails. - // - // By default, the value is set to 900 seconds. - DurationSeconds *int32 - - noSmithyDocumentSerde -} - -type AssumeRootOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // The source identity specified by the principal that is calling the AssumeRoot - // operation. - // - // You can use the aws:SourceIdentity condition key to control access based on the - // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles] - // in the IAM User Guide. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html - SourceIdentity *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpAssumeRootValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "AssumeRoot", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go deleted file mode 100644 index a56840e1b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ /dev/null @@ -1,192 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. -// -// For example, if a user is not authorized to perform an operation that he or she -// has requested, the request returns a Client.UnauthorizedOperation response (an -// HTTP 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. -// -// Only certain Amazon Web Services operations return an encoded authorization -// message. The documentation for an individual operation indicates whether that -// operation returns an encoded message in addition to returning an HTTP code. -// -// The message is encoded because the details of the authorization status can -// contain privileged information that the user who requested the operation should -// not see. To decode an authorization status message, a user must be granted -// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( -// sts:DecodeAuthorizationMessage ) action. -// -// The decoded message includes the following type of information: -// -// - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User -// Guide. -// -// - The principal who made the request. -// -// - The requested action. -// -// - The requested resource. -// -// - The values of condition keys in the context of the user's request. -// -// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow -// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html -func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { - if params == nil { - params = &DecodeAuthorizationMessageInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*DecodeAuthorizationMessageOutput) - out.ResultMetadata = metadata - return out, nil -} - -type DecodeAuthorizationMessageInput struct { - - // The encoded message that was returned with the response. - // - // This member is required. - EncodedMessage *string - - noSmithyDocumentSerde -} - -// A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an Amazon -// Web Services request. -type DecodeAuthorizationMessageOutput struct { - - // The API returns a response with the decoded message. - DecodedMessage *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "DecodeAuthorizationMessage", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go deleted file mode 100644 index c80b0550b..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ /dev/null @@ -1,183 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns the account identifier for the specified access key ID. -// -// Access keys consist of two parts: an access key ID (for example, -// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, -// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access -// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. -// -// When you pass an access key ID to this operation, it returns the ID of the -// Amazon Web Services account to which the keys belong. Access key IDs beginning -// with AKIA are long-term credentials for an IAM user or the Amazon Web Services -// account root user. Access key IDs beginning with ASIA are temporary credentials -// that are created using STS operations. If the account in the response belongs to -// you, you can sign in as the root user and review your root user access keys. -// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who -// requested the temporary credentials for an ASIA access key, view the STS events -// in your [CloudTrail logs]in the IAM User Guide. -// -// This operation does not indicate the state of the access key. The key might be -// active, inactive, or deleted. Active keys might not have permissions to perform -// an operation. Providing a deleted access key might return an error that the key -// doesn't exist. -// -// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html -// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html -// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html -func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { - if params == nil { - params = &GetAccessKeyInfoInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetAccessKeyInfoOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetAccessKeyInfoInput struct { - - // The identifier of an access key. - // - // This parameter allows (through its regex pattern) a string of characters that - // can consist of any upper- or lowercase letter or digit. - // - // This member is required. - AccessKeyId *string - - noSmithyDocumentSerde -} - -type GetAccessKeyInfoOutput struct { - - // The number used to identify the Amazon Web Services account. - Account *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetAccessKeyInfo", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go deleted file mode 100644 index 49304bdaf..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ /dev/null @@ -1,195 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns details about the IAM user or role whose credentials are used to call -// the operation. -// -// No permissions are required to perform this operation. If an administrator -// attaches a policy to your identity that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when access is denied. -// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. -// -// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa -func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetCallerIdentityOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetCallerIdentityInput struct { - noSmithyDocumentSerde -} - -// Contains the response to a successful GetCallerIdentity request, including information about the -// entity making the request. -type GetCallerIdentityOutput struct { - - // The Amazon Web Services account ID number of the account that owns or contains - // the calling entity. - Account *string - - // The Amazon Web Services ARN associated with the calling entity. - Arn *string - - // The unique identifier of the calling entity. The exact value depends on the - // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the [Principal table]found on the Policy Variables reference page in - // the IAM User Guide. - // - // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable - UserId *string - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetCallerIdentity", - } -} - -// PresignGetCallerIdentity is used to generate a presigned HTTP Request which -// contains presigned URL, signed headers and HTTP method used. -func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { - if params == nil { - params = &GetCallerIdentityInput{} - } - options := c.options.copy() - for _, fn := range optFns { - fn(&options) - } - clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) - - result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, - c.client.addOperationGetCallerIdentityMiddlewares, - presignConverter(options).convertToPresignMiddleware, - ) - if err != nil { - return nil, err - } - - out := result.(*v4.PresignedHTTPRequest) - return out, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go deleted file mode 100644 index e2ecc792a..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ /dev/null @@ -1,396 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary security credentials (consisting of an access key -// ID, a secret access key, and a security token) for a user. A typical use is in a -// proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. -// -// You must call the GetFederationToken operation using the long-term security -// credentials of an IAM user. As a result, this call is appropriate in contexts -// where those credentials can be safeguarded, usually in a server-based -// application. For a comparison of GetFederationToken with the other API -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// Although it is possible to call GetFederationToken using the security -// credentials of an Amazon Web Services account root user rather than an IAM user -// that you create for the purpose of a proxy application, we do not recommend it. -// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. -// -// You can create a mobile-based or browser-based app that can authenticate users -// using a web identity provider like Login with Amazon, Facebook, Google, or an -// OpenID Connect-compatible identity provider. In this case, we recommend that you -// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User -// Guide. -// -// # Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by -// using the root user credentials have a maximum duration of 3,600 seconds (1 -// hour). -// -// # Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service with the following exceptions: -// -// - You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. This limitation does not apply to console sessions. -// -// - You cannot call any STS operations except GetCallerIdentity . -// -// You can use temporary credentials for single sign-on (SSO) to the console. -// -// You must pass an inline or managed [session policy] to this operation. You can pass a single -// JSON policy document to use as an inline session policy. You can also specify up -// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session -// policies. The plaintext that you use for both inline and managed session -// policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM user -// policies and the session policies that you pass. This gives you a way to further -// restrict the permissions for a federated user. You cannot use session policies -// to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For -// information about using GetFederationToken to create temporary security -// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session in the -// Principal element of the policy, the session has the permissions allowed by the -// policy. These permissions are granted in addition to the permissions granted by -// the session policies. -// -// # Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User -// Guide. -// -// You can create a mobile-based or browser-based app that can authenticate users -// using a web identity provider like Login with Amazon, Facebook, Google, or an -// OpenID Connect-compatible identity provider. In this case, we recommend that you -// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User -// Guide. -// -// An administrator must grant you the permissions necessary to pass session tags. -// The administrator can also create granular permissions to allow you to pass only -// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. -// -// Tag key–value pairs are not case sensitive, but case is preserved. This means -// that you cannot have separate Department and department tag keys. Assume that -// the user that you are federating has the Department = Marketing tag and you -// pass the department = engineering session tag. Department and department are -// not saved as separate tags, and the session tag passed in the request takes -// precedence over the user tag. -// -// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity -// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Amazon Cognito]: http://aws.amazon.com/cognito/ -// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html -func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { - if params == nil { - params = &GetFederationTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetFederationTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetFederationTokenInput struct { - - // The name of the federated user. The name is used as an identifier for the - // temporary security credentials (such as Bob ). For example, you can reference - // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@- - // - // This member is required. - Name *string - - // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds - // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using root user credentials are restricted to a maximum of 3,600 seconds (one - // hour). If the specified duration is longer than one hour, the session obtained - // by using root user credentials defaults to one hour. - DurationSeconds *int32 - - // An IAM policy in JSON format that you want to use as an inline session policy. - // - // You must pass an inline or managed [session policy] to this operation. You can pass a single - // JSON policy document to use as an inline session policy. You can also specify up - // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session - // policies. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection of - // the IAM user policies and the session policies that you pass. This gives you a - // way to further restrict the permissions for a federated user. You cannot use - // session policies to grant more permissions than those that are defined in the - // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User - // Guide. - // - // The resulting credentials can be used to access a resource that has a - // resource-based policy. If that policy specifically references the federated user - // session in the Principal element of the policy, the session has the permissions - // allowed by the policy. These permissions are granted in addition to the - // permissions that are granted by the session policies. - // - // The plaintext that you use for both inline and managed session policies can't - // exceed 2,048 characters. The JSON policy characters can be any ASCII character - // from the space character to the end of the valid character list (\u0020 through - // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. - // - // You must pass an inline or managed [session policy] to this operation. You can pass a single - // JSON policy document to use as an inline session policy. You can also specify up - // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session - // policies. The plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. You can provide up to 10 managed policy - // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General - // Reference. - // - // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. - // - // When you pass session policies, the session permissions are the intersection of - // the IAM user policies and the session policies that you pass. This gives you a - // way to further restrict the permissions for a federated user. You cannot use - // session policies to grant more permissions than those that are defined in the - // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User - // Guide. - // - // The resulting credentials can be used to access a resource that has a - // resource-based policy. If that policy specifically references the federated user - // session in the Principal element of the policy, the session has the permissions - // allowed by the policy. These permissions are granted in addition to the - // permissions that are granted by the session policies. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - PolicyArns []types.PolicyDescriptorType - - // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User - // Guide. - // - // This parameter is optional. You can pass up to 50 session tags. The plaintext - // session tag keys can’t exceed 128 characters and the values can’t exceed 256 - // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. - // - // An Amazon Web Services conversion compresses the passed inline session policy, - // managed policy ARNs, and session tags into a packed binary format that has a - // separate limit. Your request can fail for this limit even if your plaintext - // meets the other requirements. The PackedPolicySize response element indicates - // by percentage how close the policies and tags for your request are to the upper - // size limit. - // - // You can pass a session tag with the same key as a tag that is already attached - // to the user you are federating. When you do, session tags override a user tag - // with the same key. - // - // Tag key–value pairs are not case sensitive, but case is preserved. This means - // that you cannot have separate Department and department tag keys. Assume that - // the role has the Department = Marketing tag and you pass the department = - // engineering session tag. Department and department are not saved as separate - // tags, and the session tag passed in the request takes precedence over the role - // tag. - // - // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - Tags []types.Tag - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetFederationToken request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type GetFederationTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // Identifiers for the federated user associated with the credentials (such as - // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use - // the federated user's ARN in your resource-based policies, such as an Amazon S3 - // bucket policy. - FederatedUser *types.FederatedUser - - // A percentage value that indicates the packed size of the session policies and - // session tags combined passed in the request. The request fails if the packed - // size is greater than 100 percent, which means the policies and tags exceeded the - // allowed space. - PackedPolicySize *int32 - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetFederationToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go deleted file mode 100644 index fdc451117..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ /dev/null @@ -1,242 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Returns a set of temporary credentials for an Amazon Web Services account or -// IAM user. The credentials consist of an access key ID, a secret access key, and -// a security token. Typically, you use GetSessionToken if you want to use MFA to -// protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . -// -// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is -// associated with their MFA device. Using the temporary security credentials that -// the call returns, IAM users can then make programmatic calls to API operations -// that require MFA authentication. An incorrect MFA code causes the API to return -// an access denied error. For a comparison of GetSessionToken with the other API -// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. -// -// No permissions are required for users to perform this operation. The purpose of -// the sts:GetSessionToken operation is to authenticate the user using MFA. You -// cannot use policies to control authentication operations. For more information, -// see [Permissions for GetSessionToken]in the IAM User Guide. -// -// # Session Duration -// -// The GetSessionToken operation must be called by using the long-term Amazon Web -// Services security credentials of an IAM user. Credentials that are created by -// IAM users are valid for the duration that you specify. This duration can range -// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), -// with a default of 43,200 seconds (12 hours). Credentials based on account -// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 -// hour), with a default of 1 hour. -// -// # Permissions -// -// The temporary security credentials created by GetSessionToken can be used to -// make API calls to any Amazon Web Services service with the following exceptions: -// -// - You cannot call any IAM API operations unless MFA authentication -// information is included in the request. -// -// - You cannot call any STS API except AssumeRole or GetCallerIdentity . -// -// The credentials that GetSessionToken returns are based on permissions -// associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. -// -// Although it is possible to call GetSessionToken using the security credentials -// of an Amazon Web Services account root user rather than an IAM user, we do not -// recommend it. If GetSessionToken is called using root user credentials, the -// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in -// the IAM User Guide -// -// For more information about using GetSessionToken to create temporary -// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. -// -// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html -// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken -// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials -// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html -// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html -func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { - if params == nil { - params = &GetSessionTokenInput{} - } - - result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) - if err != nil { - return nil, err - } - - out := result.(*GetSessionTokenOutput) - out.ResultMetadata = metadata - return out, nil -} - -type GetSessionTokenInput struct { - - // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 - // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for - // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds - // (one hour). If the duration is longer than one hour, the session for Amazon Web - // Services account owners defaults to one hour. - DurationSeconds *int32 - - // The identification number of the MFA device that is associated with the IAM - // user who is making the GetSessionToken call. Specify this value if the IAM user - // has a policy that requires MFA authentication. The value is either the serial - // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name - // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You - // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. - // - // The regex used to validate this parameter is a string of characters consisting - // of upper- and lower-case alphanumeric characters with no spaces. You can also - // include underscores or any of the following characters: =,.@:/- - SerialNumber *string - - // The value provided by the MFA device, if MFA is required. If any policy - // requires the IAM user to submit an MFA code, specify this value. If MFA - // authentication is required, the user must provide a code when requesting a set - // of temporary security credentials. A user who fails to provide the code receives - // an "access denied" response when requesting resources that require MFA - // authentication. - // - // The format for this parameter, as described by its regex pattern, is a sequence - // of six numeric digits. - TokenCode *string - - noSmithyDocumentSerde -} - -// Contains the response to a successful GetSessionToken request, including temporary Amazon Web -// Services credentials that can be used to make Amazon Web Services requests. -type GetSessionTokenOutput struct { - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // The size of the security token that STS API operations return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. - Credentials *types.Credentials - - // Metadata pertaining to the operation's result. - ResultMetadata middleware.Metadata - - noSmithyDocumentSerde -} - -func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { - if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { - return err - } - err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) - if err != nil { - return err - } - if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil { - return fmt.Errorf("add protocol finalizers: %v", err) - } - - if err = addlegacyEndpointContextSetter(stack, options); err != nil { - return err - } - if err = addSetLoggerMiddleware(stack, options); err != nil { - return err - } - if err = addClientRequestID(stack); err != nil { - return err - } - if err = addComputeContentLength(stack); err != nil { - return err - } - if err = addResolveEndpointMiddleware(stack, options); err != nil { - return err - } - if err = addComputePayloadSHA256(stack); err != nil { - return err - } - if err = addRetry(stack, options); err != nil { - return err - } - if err = addRawResponseToMetadata(stack); err != nil { - return err - } - if err = addRecordResponseTiming(stack); err != nil { - return err - } - if err = addSpanRetryLoop(stack, options); err != nil { - return err - } - if err = addClientUserAgent(stack, options); err != nil { - return err - } - if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { - return err - } - if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { - return err - } - if err = addTimeOffsetBuild(stack, c); err != nil { - return err - } - if err = addUserAgentRetryMode(stack, options); err != nil { - return err - } - if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { - return err - } - if err = addRecursionDetection(stack); err != nil { - return err - } - if err = addRequestIDRetrieverMiddleware(stack); err != nil { - return err - } - if err = addResponseErrorMiddleware(stack); err != nil { - return err - } - if err = addRequestResponseLogging(stack, options); err != nil { - return err - } - if err = addDisableHTTPSMiddleware(stack, options); err != nil { - return err - } - if err = addSpanInitializeStart(stack); err != nil { - return err - } - if err = addSpanInitializeEnd(stack); err != nil { - return err - } - if err = addSpanBuildRequestStart(stack); err != nil { - return err - } - if err = addSpanBuildRequestEnd(stack); err != nil { - return err - } - return nil -} - -func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { - return &awsmiddleware.RegisterServiceMetadata{ - Region: region, - ServiceID: ServiceID, - OperationName: "GetSessionToken", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go deleted file mode 100644 index a90b2b736..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ /dev/null @@ -1,325 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { - params.Region = options.Region -} - -type setLegacyContextSigningOptionsMiddleware struct { -} - -func (*setLegacyContextSigningOptionsMiddleware) ID() string { - return "setLegacyContextSigningOptions" -} - -func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - rscheme := getResolvedAuthScheme(ctx) - schemeID := rscheme.Scheme.SchemeID() - - if sn := awsmiddleware.GetSigningName(ctx); sn != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) - } - } - - if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { - if schemeID == "aws.auth#sigv4" { - smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) - } else if schemeID == "aws.auth#sigv4a" { - smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) - } - } - - return next.HandleFinalize(ctx, in) -} - -func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) -} - -type withAnonymous struct { - resolver AuthSchemeResolver -} - -var _ AuthSchemeResolver = (*withAnonymous)(nil) - -func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - opts, err := v.resolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return nil, err - } - - opts = append(opts, &smithyauth.Option{ - SchemeID: smithyauth.SchemeIDAnonymous, - }) - return opts, nil -} - -func wrapWithAnonymousAuth(options *Options) { - if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { - return - } - - options.AuthSchemeResolver = &withAnonymous{ - resolver: options.AuthSchemeResolver, - } -} - -// AuthResolverParameters contains the set of inputs necessary for auth scheme -// resolution. -type AuthResolverParameters struct { - // The name of the operation being invoked. - Operation string - - // The region in which the operation is being invoked. - Region string -} - -func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { - params := &AuthResolverParameters{ - Operation: operation, - } - - bindAuthParamsRegion(ctx, params, input, options) - - return params -} - -// AuthSchemeResolver returns a set of possible authentication options for an -// operation. -type AuthSchemeResolver interface { - ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) -} - -type defaultAuthSchemeResolver struct{} - -var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) - -func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { - if overrides, ok := operationAuthOptions[params.Operation]; ok { - return overrides(params), nil - } - return serviceAuthOptions(params), nil -} - -var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ - "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, - - "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - {SchemeID: smithyauth.SchemeIDAnonymous}, - } - }, -} - -func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { - return []*smithyauth.Option{ - { - SchemeID: smithyauth.SchemeIDSigV4, - SignerProperties: func() smithy.Properties { - var props smithy.Properties - smithyhttp.SetSigV4SigningName(&props, "sts") - smithyhttp.SetSigV4SigningRegion(&props, params.Region) - return props - }(), - }, - } -} - -type resolveAuthSchemeMiddleware struct { - operation string - options Options -} - -func (*resolveAuthSchemeMiddleware) ID() string { - return "ResolveAuthScheme" -} - -func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") - defer span.End() - - params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) - options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) - if err != nil { - return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) - } - - scheme, ok := m.selectScheme(options) - if !ok { - return out, metadata, fmt.Errorf("could not select an auth scheme") - } - - ctx = setResolvedAuthScheme(ctx, scheme) - - span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) - span.End() - return next.HandleFinalize(ctx, in) -} - -func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { - for _, option := range options { - if option.SchemeID == smithyauth.SchemeIDAnonymous { - return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true - } - - for _, scheme := range m.options.AuthSchemes { - if scheme.SchemeID() != option.SchemeID { - continue - } - - if scheme.IdentityResolver(m.options) != nil { - return newResolvedAuthScheme(scheme, option), true - } - } - } - - return nil, false -} - -type resolvedAuthSchemeKey struct{} - -type resolvedAuthScheme struct { - Scheme smithyhttp.AuthScheme - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { - return &resolvedAuthScheme{ - Scheme: scheme, - IdentityProperties: option.IdentityProperties, - SignerProperties: option.SignerProperties, - } -} - -func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { - return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) -} - -func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { - v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) - return v -} - -type getIdentityMiddleware struct { - options Options -} - -func (*getIdentityMiddleware) ID() string { - return "GetIdentity" -} - -func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") - defer span.End() - - rscheme := getResolvedAuthScheme(innerCtx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - resolver := rscheme.Scheme.IdentityResolver(m.options) - if resolver == nil { - return out, metadata, fmt.Errorf("no identity resolver") - } - - identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", - func() (smithyauth.Identity, error) { - return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) - }, - func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("get identity: %w", err) - } - - ctx = setIdentity(ctx, identity) - - span.End() - return next.HandleFinalize(ctx, in) -} - -type identityKey struct{} - -func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { - return middleware.WithStackValue(ctx, identityKey{}, identity) -} - -func getIdentity(ctx context.Context) smithyauth.Identity { - v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) - return v -} - -type signRequestMiddleware struct { - options Options -} - -func (*signRequestMiddleware) ID() string { - return "Signing" -} - -func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "SignRequest") - defer span.End() - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - identity := getIdentity(ctx) - if identity == nil { - return out, metadata, fmt.Errorf("no identity") - } - - signer := rscheme.Scheme.Signer() - if signer == nil { - return out, metadata, fmt.Errorf("no signer") - } - - _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { - return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) - }, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) - }) - if err != nil { - return out, metadata, fmt.Errorf("sign request: %w", err) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go deleted file mode 100644 index 59349890f..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ /dev/null @@ -1,2719 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - smithyxml "github.com/aws/smithy-go/encoding/xml" - smithyio "github.com/aws/smithy-go/io" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - smithytime "github.com/aws/smithy-go/time" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "strconv" - "strings" - "time" -) - -func deserializeS3Expires(v string) (*time.Time, error) { - t, err := smithytime.ParseHTTPDate(v) - if err != nil { - return nil, nil - } - return &t, nil -} - -type awsAwsquery_deserializeOpAssumeRole struct { -} - -func (*awsAwsquery_deserializeOpAssumeRole) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) - } - output := &AssumeRoleOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) - } - output := &AssumeRoleWithSAMLOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithSAMLResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) - } - output := &AssumeRoleWithWebIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("IDPCommunicationError", errorCode): - return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) - - case strings.EqualFold("IDPRejectedClaim", errorCode): - return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) - - case strings.EqualFold("InvalidIdentityToken", errorCode): - return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) - - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpAssumeRoot struct { -} - -func (*awsAwsquery_deserializeOpAssumeRoot) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata) - } - output := &AssumeRootOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("AssumeRootResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("ExpiredTokenException", errorCode): - return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) - } - output := &DecodeAuthorizationMessageOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DecodeAuthorizationMessageResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): - return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) - } - output := &GetAccessKeyInfoOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetAccessKeyInfoResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) - } - output := &GetCallerIdentityOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetCallerIdentityResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetFederationToken struct { -} - -func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) - } - output := &GetFederationTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetFederationTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("MalformedPolicyDocument", errorCode): - return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) - - case strings.EqualFold("PackedPolicyTooLarge", errorCode): - return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) - - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsquery_deserializeOpGetSessionToken struct { -} - -func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) - } - output := &GetSessionTokenOutput{} - out.Result = output - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(response.Body, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return out, metadata, nil - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("GetSessionTokenResult") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - return out, metadata, err -} - -func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) - if err != nil { - return err - } - if reqID := errorComponents.RequestID; len(reqID) != 0 { - awsmiddleware.SetRequestIDMetadata(metadata, reqID) - } - if len(errorComponents.Code) != 0 { - errorCode = errorComponents.Code - } - if len(errorComponents.Message) != 0 { - errorMessage = errorComponents.Message - } - errorBody.Seek(0, io.SeekStart) - switch { - case strings.EqualFold("RegionDisabledException", errorCode): - return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ExpiredTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPCommunicationErrorException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.IDPRejectedClaimException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidAuthorizationMessageException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidIdentityTokenException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.MalformedPolicyDocumentException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.PackedPolicyTooLargeException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.RegionDisabledException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.AssumedRoleUser - if *v == nil { - sv = &types.AssumedRoleUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("AssumedRoleId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AssumedRoleId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Credentials - if *v == nil { - sv = &types.Credentials{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AccessKeyId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.AccessKeyId = ptr.String(xtv) - } - - case strings.EqualFold("Expiration", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - t, err := smithytime.ParseDateTime(xtv) - if err != nil { - return err - } - sv.Expiration = ptr.Time(t) - } - - case strings.EqualFold("SecretAccessKey", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SecretAccessKey = ptr.String(xtv) - } - - case strings.EqualFold("SessionToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SessionToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.ExpiredTokenException - if *v == nil { - sv = &types.ExpiredTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.FederatedUser - if *v == nil { - sv = &types.FederatedUser{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("FederatedUserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.FederatedUserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPCommunicationErrorException - if *v == nil { - sv = &types.IDPCommunicationErrorException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IDPRejectedClaimException - if *v == nil { - sv = &types.IDPRejectedClaimException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidAuthorizationMessageException - if *v == nil { - sv = &types.InvalidAuthorizationMessageException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.InvalidIdentityTokenException - if *v == nil { - sv = &types.InvalidIdentityTokenException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.MalformedPolicyDocumentException - if *v == nil { - sv = &types.MalformedPolicyDocumentException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.PackedPolicyTooLargeException - if *v == nil { - sv = &types.PackedPolicyTooLargeException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.RegionDisabledException - if *v == nil { - sv = &types.RegionDisabledException{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("message", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Message = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleOutput - if *v == nil { - sv = &AssumeRoleOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithSAMLOutput - if *v == nil { - sv = &AssumeRoleWithSAMLOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Issuer", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Issuer = ptr.String(xtv) - } - - case strings.EqualFold("NameQualifier", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.NameQualifier = ptr.String(xtv) - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("Subject", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Subject = ptr.String(xtv) - } - - case strings.EqualFold("SubjectType", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectType = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRoleWithWebIdentityOutput - if *v == nil { - sv = &AssumeRoleWithWebIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("AssumedRoleUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Audience", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Audience = ptr.String(xtv) - } - - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - case strings.EqualFold("Provider", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Provider = ptr.String(xtv) - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SubjectFromWebIdentityToken = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *AssumeRootOutput - if *v == nil { - sv = &AssumeRootOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("SourceIdentity", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.SourceIdentity = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DecodeAuthorizationMessageOutput - if *v == nil { - sv = &DecodeAuthorizationMessageOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DecodedMessage", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.DecodedMessage = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetAccessKeyInfoOutput - if *v == nil { - sv = &GetAccessKeyInfoOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetCallerIdentityOutput - if *v == nil { - sv = &GetCallerIdentityOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Account", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Account = ptr.String(xtv) - } - - case strings.EqualFold("Arn", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Arn = ptr.String(xtv) - } - - case strings.EqualFold("UserId", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.UserId = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetFederationTokenOutput - if *v == nil { - sv = &GetFederationTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("FederatedUser", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("PackedPolicySize", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - i64, err := strconv.ParseInt(xtv, 10, 64) - if err != nil { - return err - } - sv.PackedPolicySize = ptr.Int32(int32(i64)) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *GetSessionTokenOutput - if *v == nil { - sv = &GetSessionTokenOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("Credentials", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go deleted file mode 100644 index cbb19c7f6..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -// Package sts provides the API client, operations, and parameter types for AWS -// Security Token Service. -// -// # Security Token Service -// -// Security Token Service (STS) enables you to request temporary, -// limited-privilege credentials for users. This guide provides descriptions of the -// STS API. For more information about using this service, see [Temporary Security Credentials]. -// -// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html -package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go deleted file mode 100644 index dca2ce359..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ /dev/null @@ -1,1136 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "errors" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" - "github.com/aws/aws-sdk-go-v2/internal/endpoints" - "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" - internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" - smithy "github.com/aws/smithy-go" - smithyauth "github.com/aws/smithy-go/auth" - smithyendpoints "github.com/aws/smithy-go/endpoints" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/ptr" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" - "net/url" - "os" - "strings" -) - -// EndpointResolverOptions is the service endpoint resolver options -type EndpointResolverOptions = internalendpoints.Options - -// EndpointResolver interface for resolving service endpoints. -type EndpointResolver interface { - ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) -} - -var _ EndpointResolver = &internalendpoints.Resolver{} - -// NewDefaultEndpointResolver constructs a new service endpoint resolver -func NewDefaultEndpointResolver() *internalendpoints.Resolver { - return internalendpoints.New() -} - -// EndpointResolverFunc is a helper utility that wraps a function so it satisfies -// the EndpointResolver interface. This is useful when you want to add additional -// endpoint resolving logic, or stub out specific endpoints with custom values. -type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) - -func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return fn(region, options) -} - -// EndpointResolverFromURL returns an EndpointResolver configured using the -// provided endpoint url. By default, the resolved endpoint resolver uses the -// client region as signing region, and the endpoint source is set to -// EndpointSourceCustom.You can provide functional options to configure endpoint -// values for the resolved endpoint. -func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { - e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} - for _, fn := range optFns { - fn(&e) - } - - return EndpointResolverFunc( - func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { - if len(e.SigningRegion) == 0 { - e.SigningRegion = region - } - return e, nil - }, - ) -} - -type ResolveEndpoint struct { - Resolver EndpointResolver - Options EndpointResolverOptions -} - -func (*ResolveEndpoint) ID() string { - return "ResolveEndpoint" -} - -func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleSerialize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.Resolver == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - eo := m.Options - eo.Logger = middleware.GetLogger(ctx) - - var endpoint aws.Endpoint - endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) - if err != nil { - nf := (&aws.EndpointNotFoundError{}) - if errors.As(err, &nf) { - ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) - return next.HandleSerialize(ctx, in) - } - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - req.URL, err = url.Parse(endpoint.URL) - if err != nil { - return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) - } - - if len(awsmiddleware.GetSigningName(ctx)) == 0 { - signingName := endpoint.SigningName - if len(signingName) == 0 { - signingName = "sts" - } - ctx = awsmiddleware.SetSigningName(ctx, signingName) - } - ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) - ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) - ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) - ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) - return next.HandleSerialize(ctx, in) -} -func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { - return stack.Serialize.Insert(&ResolveEndpoint{ - Resolver: o.EndpointResolver, - Options: o.EndpointOptions, - }, "OperationSerializer", middleware.Before) -} - -func removeResolveEndpointMiddleware(stack *middleware.Stack) error { - _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) - return err -} - -type wrappedEndpointResolver struct { - awsResolver aws.EndpointResolverWithOptions -} - -func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - return w.awsResolver.ResolveEndpoint(ServiceID, region, options) -} - -type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) - -func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { - return a(service, region) -} - -var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) - -// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, -// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked -// via its middleware. -// -// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { - var resolver aws.EndpointResolverWithOptions - - if awsResolverWithOptions != nil { - resolver = awsResolverWithOptions - } else if awsResolver != nil { - resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) - } - - return &wrappedEndpointResolver{ - awsResolver: resolver, - } -} - -func finalizeClientEndpointResolverOptions(options *Options) { - options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() - - if len(options.EndpointOptions.ResolvedRegion) == 0 { - const fipsInfix = "-fips-" - const fipsPrefix = "fips-" - const fipsSuffix = "-fips" - - if strings.Contains(options.Region, fipsInfix) || - strings.Contains(options.Region, fipsPrefix) || - strings.Contains(options.Region, fipsSuffix) { - options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( - options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") - options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled - } - } - -} - -func resolveEndpointResolverV2(options *Options) { - if options.EndpointResolverV2 == nil { - options.EndpointResolverV2 = NewDefaultEndpointResolverV2() - } -} - -func resolveBaseEndpoint(cfg aws.Config, o *Options) { - if cfg.BaseEndpoint != nil { - o.BaseEndpoint = cfg.BaseEndpoint - } - - _, g := os.LookupEnv("AWS_ENDPOINT_URL") - _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") - - if g && !s { - return - } - - value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) - if found && err == nil { - o.BaseEndpoint = &value - } -} - -func bindRegion(region string) *string { - if region == "" { - return nil - } - return aws.String(endpoints.MapFIPSRegion(region)) -} - -// EndpointParameters provides the parameters that influence how endpoints are -// resolved. -type EndpointParameters struct { - // The AWS region used to dispatch the request. - // - // Parameter is - // required. - // - // AWS::Region - Region *string - - // When true, use the dual-stack endpoint. If the configured endpoint does not - // support dual-stack, dispatching the request MAY return an error. - // - // Defaults to - // false if no value is provided. - // - // AWS::UseDualStack - UseDualStack *bool - - // When true, send this request to the FIPS-compliant regional endpoint. If the - // configured endpoint does not have a FIPS compliant endpoint, dispatching the - // request will return an error. - // - // Defaults to false if no value is - // provided. - // - // AWS::UseFIPS - UseFIPS *bool - - // Override the endpoint used to send this request - // - // Parameter is - // required. - // - // SDK::Endpoint - Endpoint *string - - // Whether the global endpoint should be used, rather then the regional endpoint - // for us-east-1. - // - // Defaults to false if no value is - // provided. - // - // AWS::STS::UseGlobalEndpoint - UseGlobalEndpoint *bool -} - -// ValidateRequired validates required parameters are set. -func (p EndpointParameters) ValidateRequired() error { - if p.UseDualStack == nil { - return fmt.Errorf("parameter UseDualStack is required") - } - - if p.UseFIPS == nil { - return fmt.Errorf("parameter UseFIPS is required") - } - - if p.UseGlobalEndpoint == nil { - return fmt.Errorf("parameter UseGlobalEndpoint is required") - } - - return nil -} - -// WithDefaults returns a shallow copy of EndpointParameterswith default values -// applied to members where applicable. -func (p EndpointParameters) WithDefaults() EndpointParameters { - if p.UseDualStack == nil { - p.UseDualStack = ptr.Bool(false) - } - - if p.UseFIPS == nil { - p.UseFIPS = ptr.Bool(false) - } - - if p.UseGlobalEndpoint == nil { - p.UseGlobalEndpoint = ptr.Bool(false) - } - return p -} - -type stringSlice []string - -func (s stringSlice) Get(i int) *string { - if i < 0 || i >= len(s) { - return nil - } - - v := s[i] - return &v -} - -// EndpointResolverV2 provides the interface for resolving service endpoints. -type EndpointResolverV2 interface { - // ResolveEndpoint attempts to resolve the endpoint with the provided options, - // returning the endpoint if found. Otherwise an error is returned. - ResolveEndpoint(ctx context.Context, params EndpointParameters) ( - smithyendpoints.Endpoint, error, - ) -} - -// resolver provides the implementation for resolving endpoints. -type resolver struct{} - -func NewDefaultEndpointResolverV2() EndpointResolverV2 { - return &resolver{} -} - -// ResolveEndpoint attempts to resolve the endpoint with the provided options, -// returning the endpoint if found. Otherwise an error is returned. -func (r *resolver) ResolveEndpoint( - ctx context.Context, params EndpointParameters, -) ( - endpoint smithyendpoints.Endpoint, err error, -) { - params = params.WithDefaults() - if err = params.ValidateRequired(); err != nil { - return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) - } - _UseDualStack := *params.UseDualStack - _UseFIPS := *params.UseFIPS - _UseGlobalEndpoint := *params.UseGlobalEndpoint - - if _UseGlobalEndpoint == true { - if !(params.Endpoint != nil) { - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == false { - if _UseDualStack == false { - if _Region == "ap-northeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-south-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ap-southeast-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "ca-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-central-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-north-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "eu-west-3" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "sa-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-east-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-1" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - if _Region == "us-west-2" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - } - } - } - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if _UseFIPS == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") - } - if _UseDualStack == true { - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") - } - uriString := _Endpoint - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if exprVal := params.Region; exprVal != nil { - _Region := *exprVal - _ = _Region - if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { - _PartitionResult := *exprVal - _ = _PartitionResult - if _UseFIPS == true { - if _UseDualStack == true { - if true == _PartitionResult.SupportsFIPS { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") - } - } - if _UseFIPS == true { - if _PartitionResult.SupportsFIPS == true { - if _PartitionResult.Name == "aws-us-gov" { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts-fips.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") - } - if _UseDualStack == true { - if true == _PartitionResult.SupportsDualStack { - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DualStackDnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") - } - if _Region == "aws-global" { - uriString := "https://sts.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetSigV4SigningName(&sp, "sts") - smithyhttp.SetSigV4ASigningName(&sp, "sts") - - smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://sts.") - out.WriteString(_Region) - out.WriteString(".") - out.WriteString(_PartitionResult.DnsSuffix) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") -} - -type endpointParamsBinder interface { - bindEndpointParams(*EndpointParameters) -} - -func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { - params := &EndpointParameters{} - - params.Region = bindRegion(options.Region) - params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) - params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) - params.Endpoint = options.BaseEndpoint - - if b, ok := input.(endpointParamsBinder); ok { - b.bindEndpointParams(params) - } - - return params -} - -type resolveEndpointV2Middleware struct { - options Options -} - -func (*resolveEndpointV2Middleware) ID() string { - return "ResolveEndpointV2" -} - -func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "ResolveEndpoint") - defer span.End() - - if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { - return next.HandleFinalize(ctx, in) - } - - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - if m.options.EndpointResolverV2 == nil { - return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") - } - - params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) - endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", - func() (smithyendpoints.Endpoint, error) { - return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) - }) - if err != nil { - return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) - } - - span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) - - if endpt.URI.RawPath == "" && req.URL.RawPath != "" { - endpt.URI.RawPath = endpt.URI.Path - } - req.URL.Scheme = endpt.URI.Scheme - req.URL.Host = endpt.URI.Host - req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) - req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) - for k := range endpt.Headers { - req.Header.Set(k, endpt.Headers.Get(k)) - } - - rscheme := getResolvedAuthScheme(ctx) - if rscheme == nil { - return out, metadata, fmt.Errorf("no resolved auth scheme") - } - - opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) - for _, o := range opts { - rscheme.SignerProperties.SetAll(&o.SignerProperties) - } - - span.End() - return next.HandleFinalize(ctx, in) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json deleted file mode 100644 index 70a88452e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "dependencies": { - "github.com/aws/aws-sdk-go-v2": "v1.4.0", - "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", - "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/smithy-go": "v1.4.0" - }, - "files": [ - "api_client.go", - "api_client_test.go", - "api_op_AssumeRole.go", - "api_op_AssumeRoleWithSAML.go", - "api_op_AssumeRoleWithWebIdentity.go", - "api_op_AssumeRoot.go", - "api_op_DecodeAuthorizationMessage.go", - "api_op_GetAccessKeyInfo.go", - "api_op_GetCallerIdentity.go", - "api_op_GetFederationToken.go", - "api_op_GetSessionToken.go", - "auth.go", - "deserializers.go", - "doc.go", - "endpoints.go", - "endpoints_config_test.go", - "endpoints_test.go", - "generated.json", - "internal/endpoints/endpoints.go", - "internal/endpoints/endpoints_test.go", - "options.go", - "protocol_test.go", - "serializers.go", - "snapshot_test.go", - "types/errors.go", - "types/types.go", - "validators.go" - ], - "go": "1.15", - "module": "github.com/aws/aws-sdk-go-v2/service/sts", - "unstable": false -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go deleted file mode 100644 index b7c1dfdfa..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package sts - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.33.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go deleted file mode 100644 index 4e07994d0..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ /dev/null @@ -1,521 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package endpoints - -import ( - "github.com/aws/aws-sdk-go-v2/aws" - endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" - "github.com/aws/smithy-go/logging" - "regexp" -) - -// Options is the endpoint resolver configuration options -type Options struct { - // Logger is a logging implementation that log events should be sent to. - Logger logging.Logger - - // LogDeprecated indicates that deprecated endpoints should be logged to the - // provided logger. - LogDeprecated bool - - // ResolvedRegion is used to override the region to be resolved, rather then the - // using the value passed to the ResolveEndpoint method. This value is used by the - // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative - // name. You must not set this value directly in your application. - ResolvedRegion string - - // DisableHTTPS informs the resolver to return an endpoint that does not use the - // HTTPS scheme. - DisableHTTPS bool - - // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. - UseDualStackEndpoint aws.DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint aws.FIPSEndpointState -} - -func (o Options) GetResolvedRegion() string { - return o.ResolvedRegion -} - -func (o Options) GetDisableHTTPS() bool { - return o.DisableHTTPS -} - -func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { - return o.UseDualStackEndpoint -} - -func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { - return o.UseFIPSEndpoint -} - -func transformToSharedOptions(options Options) endpoints.Options { - return endpoints.Options{ - Logger: options.Logger, - LogDeprecated: options.LogDeprecated, - ResolvedRegion: options.ResolvedRegion, - DisableHTTPS: options.DisableHTTPS, - UseDualStackEndpoint: options.UseDualStackEndpoint, - UseFIPSEndpoint: options.UseFIPSEndpoint, - } -} - -// Resolver STS endpoint resolver -type Resolver struct { - partitions endpoints.Partitions -} - -// ResolveEndpoint resolves the service endpoint for the given region and options -func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { - if len(region) == 0 { - return endpoint, &aws.MissingRegionError{} - } - - opt := transformToSharedOptions(options) - return r.partitions.ResolveEndpoint(region, opt) -} - -// New returns a new Resolver -func New() *Resolver { - return &Resolver{ - partitions: defaultPartitions, - } -} - -var partitionRegexp = struct { - Aws *regexp.Regexp - AwsCn *regexp.Regexp - AwsIso *regexp.Regexp - AwsIsoB *regexp.Regexp - AwsIsoE *regexp.Regexp - AwsIsoF *regexp.Regexp - AwsUsGov *regexp.Regexp -}{ - - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), - AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), - AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), - AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), - AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), - AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), - AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), -} - -var defaultPartitions = endpoints.Partitions{ - { - ID: "aws", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.Aws, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "af-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-northeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-4", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-5", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ap-southeast-7", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "aws-global", - }: endpoints.Endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - }, - endpoints.EndpointKey{ - Region: "ca-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "ca-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-central-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-south-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "eu-west-3", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "il-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "me-south-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "mx-central-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "sa-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-east-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-east-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-east-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-east-2", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-west-2", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-west-2", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-west-2-fips", - }: endpoints.Endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-west-2", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, - { - ID: "aws-cn", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsCn, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "cn-north-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "cn-northwest-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIso, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-iso-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-iso-west-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-b", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoB, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-isob-east-1", - }: endpoints.Endpoint{}, - }, - }, - { - ID: "aws-iso-e", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoE, - IsRegionalized: true, - }, - { - ID: "aws-iso-f", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsIsoF, - IsRegionalized: true, - }, - { - ID: "aws-us-gov", - Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ - { - Variant: endpoints.DualStackVariant, - }: { - Hostname: "sts.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, - }: { - Hostname: "sts-fips.{region}.api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - { - Variant: 0, - }: { - Hostname: "sts.{region}.amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - RegionRegex: partitionRegexp.AwsUsGov, - IsRegionalized: true, - Endpoints: endpoints.Endpoints{ - endpoints.EndpointKey{ - Region: "us-gov-east-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-east-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-east-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: aws.TrueTernary, - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - }: endpoints.Endpoint{}, - endpoints.EndpointKey{ - Region: "us-gov-west-1", - Variant: endpoints.FIPSVariant, - }: { - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpoints.EndpointKey{ - Region: "us-gov-west-1-fips", - }: endpoints.Endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: endpoints.CredentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: aws.TrueTernary, - }, - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go deleted file mode 100644 index e1398f3bb..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/aws" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" - smithyauth "github.com/aws/smithy-go/auth" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "net/http" -) - -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // The optional application specific identifier appended to the User-Agent header. - AppID string - - // This endpoint will be given as input to an EndpointResolverV2. It is used for - // providing a custom base endpoint that is subject to modifications by the - // processing EndpointResolverV2. - BaseEndpoint *string - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - // - // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a - // value for this field will likely prevent you from using any endpoint-related - // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. - // - // To migrate an EndpointResolver implementation that uses a custom endpoint, set - // the client option BaseEndpoint instead. - EndpointResolver EndpointResolver - - // Resolves the endpoint used for a particular service operation. This should be - // used over the deprecated EndpointResolver. - EndpointResolverV2 EndpointResolverV2 - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The client meter provider. - MeterProvider metrics.MeterProvider - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. - // - // If specified in an operation call's functional options with a value that is - // different than the constructed client's Options, the Client's Retryer will be - // wrapped to use the operation's specific RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. - // - // When creating a new API Clients this member will only be used if the Retryer - // Options member is nil. This value will be ignored if Retryer is not nil. - // - // Currently does not support per operation call overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer - - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment - - // The client tracer provider. - TracerProvider tracing.TracerProvider - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. - // - // Currently does not support per operation call overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient - - // The auth scheme resolver which determines how to authenticate for each - // operation. - AuthSchemeResolver AuthSchemeResolver - - // The list of auth schemes supported by the client. - AuthSchemes []smithyhttp.AuthScheme -} - -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to -} - -func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { - if schemeID == "aws.auth#sigv4" { - return getSigV4IdentityResolver(o) - } - if schemeID == "smithy.api#noAuth" { - return &smithyauth.AnonymousIdentityResolver{} - } - return nil -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) - } -} - -// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for -// this field will likely prevent you from using any endpoint-related service -// features released after the introduction of EndpointResolverV2 and BaseEndpoint. -// -// To migrate an EndpointResolver implementation that uses a custom endpoint, set -// the client option BaseEndpoint instead. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} - -// WithEndpointResolverV2 returns a functional option for setting the Client's -// EndpointResolverV2 option. -func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { - return func(o *Options) { - o.EndpointResolverV2 = v - } -} - -func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { - if o.Credentials != nil { - return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} - } - return nil -} - -// WithSigV4SigningName applies an override to the authentication workflow to -// use the given signing name for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing name from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningName(name string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), - middleware.Before, - ) - }) - } -} - -// WithSigV4SigningRegion applies an override to the authentication workflow to -// use the given signing region for SigV4-authenticated operations. -// -// This is an advanced setting. The value here is FINAL, taking precedence over -// the resolved signing region from both auth scheme resolution and endpoint -// resolution. -func WithSigV4SigningRegion(region string) func(*Options) { - fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, - ) { - return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) - } - return func(o *Options) { - o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { - return s.Initialize.Add( - middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), - middleware.Before, - ) - }) - } -} - -func ignoreAnonymousAuth(options *Options) { - if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { - options.Credentials = nil - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go deleted file mode 100644 index 96b222136..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ /dev/null @@ -1,1005 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/aws/protocol/query" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/encoding/httpbinding" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" - smithyhttp "github.com/aws/smithy-go/transport/http" - "path" -) - -type awsAwsquery_serializeOpAssumeRole struct { -} - -func (*awsAwsquery_serializeOpAssumeRole) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRole") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithSAML struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithSAML") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { -} - -func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoleWithWebIdentity") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpAssumeRoot struct { -} - -func (*awsAwsquery_serializeOpAssumeRoot) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*AssumeRootInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("AssumeRoot") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { -} - -func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("DecodeAuthorizationMessage") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetAccessKeyInfo struct { -} - -func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetAccessKeyInfo") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetCallerIdentity struct { -} - -func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetCallerIdentityInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetCallerIdentity") - body.Key("Version").String("2011-06-15") - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetFederationToken struct { -} - -func (*awsAwsquery_serializeOpGetFederationToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetFederationTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetFederationToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} - -type awsAwsquery_serializeOpGetSessionToken struct { -} - -func (*awsAwsquery_serializeOpGetSessionToken) ID() string { - return "OperationSerializer" -} - -func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - _, span := tracing.StartSpan(ctx, "OperationSerializer") - endTimer := startMetricTimer(ctx, "client.call.serialization_duration") - defer endTimer() - defer span.End() - request, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} - } - - input, ok := in.Parameters.(*GetSessionTokenInput) - _ = input - if !ok { - return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} - } - - operationPath := "/" - if len(request.Request.URL.Path) == 0 { - request.Request.URL.Path = operationPath - } else { - request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) - if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { - request.Request.URL.Path += "/" - } - } - request.Request.Method = "POST" - httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") - - bodyWriter := bytes.NewBuffer(nil) - bodyEncoder := query.NewEncoder(bodyWriter) - body := bodyEncoder.Object() - body.Key("Action").String("GetSessionToken") - body.Key("Version").String("2011-06-15") - - if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - err = bodyEncoder.Encode() - if err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - - if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { - return out, metadata, &smithy.SerializationError{Err: err} - } - in.Request = request - - endTimer() - span.End() - return next.HandleSerialize(ctx, in) -} -func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { - object := value.Object() - _ = object - - if v.Arn != nil { - objectKey := object.Key("arn") - objectKey.String(*v.Arn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { - object := value.Object() - _ = object - - if v.ContextAssertion != nil { - objectKey := object.Key("ContextAssertion") - objectKey.String(*v.ContextAssertion) - } - - if v.ProviderArn != nil { - objectKey := object.Key("ProviderArn") - objectKey.String(*v.ProviderArn) - } - - return nil -} - -func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { - object := value.Object() - _ = object - - if v.Key != nil { - objectKey := object.Key("Key") - objectKey.String(*v.Key) - } - - if v.Value != nil { - objectKey := object.Key("Value") - objectKey.String(*v.Value) - } - - return nil -} - -func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - av.String(v[i]) - } - return nil -} - -func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { - array := value.Array("member") - - for i := range v { - av := array.Value() - if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { - return err - } - } - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.ExternalId != nil { - objectKey := object.Key("ExternalId") - objectKey.String(*v.ExternalId) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProvidedContexts != nil { - objectKey := object.Key("ProvidedContexts") - if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { - return err - } - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.SourceIdentity != nil { - objectKey := object.Key("SourceIdentity") - objectKey.String(*v.SourceIdentity) - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - if v.TransitiveTagKeys != nil { - objectKey := object.Key("TransitiveTagKeys") - if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.PrincipalArn != nil { - objectKey := object.Key("PrincipalArn") - objectKey.String(*v.PrincipalArn) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.SAMLAssertion != nil { - objectKey := object.Key("SAMLAssertion") - objectKey.String(*v.SAMLAssertion) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.ProviderId != nil { - objectKey := object.Key("ProviderId") - objectKey.String(*v.ProviderId) - } - - if v.RoleArn != nil { - objectKey := object.Key("RoleArn") - objectKey.String(*v.RoleArn) - } - - if v.RoleSessionName != nil { - objectKey := object.Key("RoleSessionName") - objectKey.String(*v.RoleSessionName) - } - - if v.WebIdentityToken != nil { - objectKey := object.Key("WebIdentityToken") - objectKey.String(*v.WebIdentityToken) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.TargetPrincipal != nil { - objectKey := object.Key("TargetPrincipal") - objectKey.String(*v.TargetPrincipal) - } - - if v.TaskPolicyArn != nil { - objectKey := object.Key("TaskPolicyArn") - if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { - object := value.Object() - _ = object - - if v.EncodedMessage != nil { - objectKey := object.Key("EncodedMessage") - objectKey.String(*v.EncodedMessage) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { - object := value.Object() - _ = object - - if v.AccessKeyId != nil { - objectKey := object.Key("AccessKeyId") - objectKey.String(*v.AccessKeyId) - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { - object := value.Object() - _ = object - - return nil -} - -func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.Name != nil { - objectKey := object.Key("Name") - objectKey.String(*v.Name) - } - - if v.Policy != nil { - objectKey := object.Key("Policy") - objectKey.String(*v.Policy) - } - - if v.PolicyArns != nil { - objectKey := object.Key("PolicyArns") - if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { - return err - } - } - - if v.Tags != nil { - objectKey := object.Key("Tags") - if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { - return err - } - } - - return nil -} - -func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { - object := value.Object() - _ = object - - if v.DurationSeconds != nil { - objectKey := object.Key("DurationSeconds") - objectKey.Integer(*v.DurationSeconds) - } - - if v.SerialNumber != nil { - objectKey := object.Key("SerialNumber") - objectKey.String(*v.SerialNumber) - } - - if v.TokenCode != nil { - objectKey := object.Key("TokenCode") - objectKey.String(*v.TokenCode) - } - - return nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go deleted file mode 100644 index 041629bba..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The web identity token that was passed is expired or is not valid. Get a new -// identity token from the identity provider and then retry the request. -type ExpiredTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ExpiredTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ExpiredTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ExpiredTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ExpiredTokenException" - } - return *e.ErrorCodeOverride -} -func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request could not be fulfilled because the identity provider (IDP) that was -// asked to verify the incoming identity token could not be reached. This is often -// a transient error caused by network conditions. Retry the request a limited -// number of times so that you don't exceed the request rate. If the error -// persists, the identity provider might be down or not responding. -type IDPCommunicationErrorException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPCommunicationErrorException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPCommunicationErrorException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPCommunicationErrorException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPCommunicationError" - } - return *e.ErrorCodeOverride -} -func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it can -// also mean that the claim has expired or has been explicitly revoked. -type IDPRejectedClaimException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *IDPRejectedClaimException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *IDPRejectedClaimException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *IDPRejectedClaimException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "IDPRejectedClaim" - } - return *e.ErrorCodeOverride -} -func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as line -// breaks, or if the message has expired. -type InvalidAuthorizationMessageException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidAuthorizationMessageException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidAuthorizationMessageException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidAuthorizationMessageException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidAuthorizationMessageException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry the -// request. -type InvalidIdentityTokenException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidIdentityTokenException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidIdentityTokenException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidIdentityTokenException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidIdentityToken" - } - return *e.ErrorCodeOverride -} -func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -type MalformedPolicyDocumentException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *MalformedPolicyDocumentException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *MalformedPolicyDocumentException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *MalformedPolicyDocumentException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "MalformedPolicyDocument" - } - return *e.ErrorCodeOverride -} -func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session tags -// into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. -// -// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length -type PackedPolicyTooLargeException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PackedPolicyTooLargeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PackedPolicyTooLargeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PackedPolicyTooLargeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PackedPolicyTooLarge" - } - return *e.ErrorCodeOverride -} -func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM -// User Guide. -// -// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html -type RegionDisabledException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegionDisabledException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegionDisabledException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegionDisabledException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegionDisabledException" - } - return *e.ErrorCodeOverride -} -func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go deleted file mode 100644 index dff7a3c2e..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// The identifiers for the temporary security credentials that the operation -// returns. -type AssumedRoleUser struct { - - // The ARN of the temporary security credentials that are returned from the AssumeRole - // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in - // the IAM User Guide. - // - // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html - // - // This member is required. - Arn *string - - // A unique identifier that contains the role ID and the role session name of the - // role that is being assumed. The role ID is generated by Amazon Web Services when - // the role is created. - // - // This member is required. - AssumedRoleId *string - - noSmithyDocumentSerde -} - -// Amazon Web Services credentials for API authentication. -type Credentials struct { - - // The access key ID that identifies the temporary security credentials. - // - // This member is required. - AccessKeyId *string - - // The date on which the current credentials expire. - // - // This member is required. - Expiration *time.Time - - // The secret access key that can be used to sign requests. - // - // This member is required. - SecretAccessKey *string - - // The token that users must pass to the service API to use the temporary - // credentials. - // - // This member is required. - SessionToken *string - - noSmithyDocumentSerde -} - -// Identifiers for the federated user that is associated with the credentials. -type FederatedUser struct { - - // The ARN that specifies the federated user that is associated with the - // credentials. For more information about ARNs and how to use them in policies, - // see [IAM Identifiers]in the IAM User Guide. - // - // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html - // - // This member is required. - Arn *string - - // The string that identifies the federated user associated with the credentials, - // similar to the unique ID of an IAM user. - // - // This member is required. - FederatedUserId *string - - noSmithyDocumentSerde -} - -// A reference to the IAM managed policy that is passed as a session policy for a -// role session or a federated user session. -type PolicyDescriptorType struct { - - // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web - // Services General Reference. - // - // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - Arn *string - - noSmithyDocumentSerde -} - -// Contains information about the provided context. This includes the signed and -// encrypted trusted context assertion and the context provider ARN from which the -// trusted context assertion was generated. -type ProvidedContext struct { - - // The signed and encrypted trusted context assertion generated by the context - // provider. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. - ContextAssertion *string - - // The context provider ARN from which the trusted context assertion was generated. - ProviderArn *string - - noSmithyDocumentSerde -} - -// You can pass custom key-value pair attributes when you assume a role or -// federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User -// Guide. -// -// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html -type Tag struct { - - // The key for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag keys can’t - // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User - // Guide. - // - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // - // This member is required. - Key *string - - // The value for a session tag. - // - // You can pass up to 50 session tags. The plain text session tag values can’t - // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User - // Guide. - // - // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go deleted file mode 100644 index 1026e2211..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ /dev/null @@ -1,347 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package sts - -import ( - "context" - "fmt" - "github.com/aws/aws-sdk-go-v2/service/sts/types" - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/middleware" -) - -type validateOpAssumeRole struct { -} - -func (*validateOpAssumeRole) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithSAML struct { -} - -func (*validateOpAssumeRoleWithSAML) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoleWithWebIdentity struct { -} - -func (*validateOpAssumeRoleWithWebIdentity) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpAssumeRoot struct { -} - -func (*validateOpAssumeRoot) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*AssumeRootInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpAssumeRootInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpDecodeAuthorizationMessage struct { -} - -func (*validateOpDecodeAuthorizationMessage) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetAccessKeyInfo struct { -} - -func (*validateOpGetAccessKeyInfo) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetAccessKeyInfoInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetAccessKeyInfoInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -type validateOpGetFederationToken struct { -} - -func (*validateOpGetFederationToken) ID() string { - return "OperationInputValidation" -} - -func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - input, ok := in.Parameters.(*GetFederationTokenInput) - if !ok { - return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) - } - if err := validateOpGetFederationTokenInput(input); err != nil { - return out, metadata, err - } - return next.HandleInitialize(ctx, in) -} - -func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) -} - -func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) -} - -func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) -} - -func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) -} - -func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) -} - -func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) -} - -func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { - return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) -} - -func validateTag(v *types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "Tag"} - if v.Key == nil { - invalidParams.Add(smithy.NewErrParamRequired("Key")) - } - if v.Value == nil { - invalidParams.Add(smithy.NewErrParamRequired("Value")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateTagListType(v []types.Tag) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "TagListType"} - for i := range v { - if err := validateTag(&v[i]); err != nil { - invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleInput(v *AssumeRoleInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.PrincipalArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) - } - if v.SAMLAssertion == nil { - invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} - if v.RoleArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) - } - if v.RoleSessionName == nil { - invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) - } - if v.WebIdentityToken == nil { - invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpAssumeRootInput(v *AssumeRootInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"} - if v.TargetPrincipal == nil { - invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal")) - } - if v.TaskPolicyArn == nil { - invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} - if v.EncodedMessage == nil { - invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} - if v.AccessKeyId == nil { - invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} - -func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { - if v == nil { - return nil - } - invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} - if v.Name == nil { - invalidParams.Add(smithy.NewErrParamRequired("Name")) - } - if v.Tags != nil { - if err := validateTagListType(v.Tags); err != nil { - invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) - } - } - if invalidParams.Len() > 0 { - return invalidParams - } else { - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore deleted file mode 100644 index 2518b3491..000000000 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Eclipse -.classpath -.project -.settings/ - -# Intellij -.idea/ -*.iml -*.iws - -# Mac -.DS_Store - -# Maven -target/ -**/dependency-reduced-pom.xml - -# Gradle -/.gradle -build/ -*/out/ -*/*/out/ - -# VS Code -bin/ -.vscode/ - -# make -c.out diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml deleted file mode 100644 index f8d1035cc..000000000 --- a/vendor/github.com/aws/smithy-go/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -sudo: true -dist: bionic - -branches: - only: - - main - -os: - - linux - - osx - # Travis doesn't work with windows and Go tip - #- windows - -go: - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi - - (cd /tmp/; go get golang.org/x/lint/golint) - -script: - - make go test -v ./...; - diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md deleted file mode 100644 index 56b19e3a1..000000000 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ /dev/null @@ -1,282 +0,0 @@ -# Release (2024-11-15) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.1 - * **Bug Fix**: Fix failure to replace URI path segments when their names overlap. - -# Release (2024-10-03) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.22.0 - * **Feature**: Add HTTP client metrics. - -# Release (2024-09-25) - -## Module Highlights -* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25) - * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing. - -# Release (2024-09-19) - -## General Highlights -* **Dependency Update**: Updated to the latest SDK module versions - -## Module Highlights -* `github.com/aws/smithy-go`: v1.21.0 - * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients. -* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19) - * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients. -* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19) - * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients. - -# Release (2024-08-14) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.4 - * **Dependency Update**: Bump minimum Go version to 1.21. - -# Release (2024-06-27) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.3 - * **Bug Fix**: Fix encoding/cbor test overflow on x86. - -# Release (2024-03-29) - -* No change notes available for this release. - -# Release (2024-02-21) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.1 - * **Bug Fix**: Remove runtime dependency on go-cmp. - -# Release (2024-02-13) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.20.0 - * **Feature**: Add codegen definition for sigv4a trait. - * **Feature**: Bump minimum Go version to 1.20 per our language support policy. - -# Release (2023-12-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.19.0 - * **Feature**: Support modeled request compression. - -# Release (2023-11-30) - -* No change notes available for this release. - -# Release (2023-11-29) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.18.0 - * **Feature**: Expose Options() method on generated service clients. - -# Release (2023-11-15) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.17.0 - * **Feature**: Support identity/auth components of client reference architecture. - -# Release (2023-10-31) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.16.0 - * **Feature**: **LANG**: Bump minimum go version to 1.19. - -# Release (2023-10-06) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.15.0 - * **Feature**: Add `http.WithHeaderComment` middleware. - -# Release (2023-08-18) - -* No change notes available for this release. - -# Release (2023-08-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.14.1 - * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. - -# Release (2023-07-31) - -## General Highlights -* **Feature**: Adds support for smithy-modeled endpoint resolution. - -# Release (2022-12-02) - -* No change notes available for this release. - -# Release (2022-10-24) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.4 - * **Bug Fix**: fixed document type checking for encoding nested types - -# Release (2022-09-14) - -* No change notes available for this release. - -# Release (v1.13.2) - -* No change notes available for this release. - -# Release (v1.13.1) - -* No change notes available for this release. - -# Release (v1.13.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.0 - * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. - -# Release (v1.12.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.1 - * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. - -# Release (v1.12.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.0 - * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. - -# Release (v1.11.3) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.3 - * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. - -# Release (v1.11.2) - -* No change notes available for this release. - -# Release (v1.11.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.1 - * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) - -# Release (v1.11.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.0 - * **Feature**: Updates deserialization of header list to supported quoted strings - -# Release (v1.10.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.10.0 - * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. - -# Release (v1.9.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.1 - * **Documentation**: Fixes various typos in Go package documentation. - -# Release (v1.9.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.0 - * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. - * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. - -# Release v1.8.1 - -### Smithy Go Module -* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. - * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) - -# Release v1.8.0 - -### Smithy Go Module - -* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) - * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. - * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) - -# Release v1.7.0 - -### Smithy Go Module -* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) - * Handle error for defer close call -* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) - * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. -* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) - -### Codegen -* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) - * Adds support for Smithy Document shapes and supporting types for protocols to implement support - -# Release v1.6.0 (2021-07-15) - -### Smithy Go Module -* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) - -### Codegen -* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) -* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) - -# Release v1.5.0 (2021-06-25) - -### Smithy Go module -* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) - * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. - -### Codegen -* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) -* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) -* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) -* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) -* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) -* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) - -# Release v1.4.0 (2021-05-06) - -### Smithy Go module -* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) - -### Codegen -* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) -* Add support for httpQueryParams location -* Add support for model renaming conflict resolution with service closure - -# Release v1.3.1 (2021-04-08) - -### Smithy Go module -* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) -* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) - -# Release v1.3.0 (2021-04-01) - -### Smithy Go module -* `transport/http`: Add utility to safely join string to url path, and url raw query. - -### Codegen -* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. - -# Release v1.2.0 (2021-03-12) - -### Smithy Go module -* Fix support for parsing shortened year format in HTTP Date header. -* Fix GitHub APIDiff action workflow to get gorelease tool correctly. -* Fix codegen artifact unit test for Go 1.16 - -### Codegen -* Fix generating paginator nil parameter handling before usage. -* Fix Serialize unboxed members decorated as required. -* Add ability to define resolvers at both client construction and operation invocation. -* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa6..000000000 --- a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md deleted file mode 100644 index c4b6a1c50..000000000 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE deleted file mode 100644 index 67db85882..000000000 --- a/vendor/github.com/aws/smithy-go/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile deleted file mode 100644 index e66fa8cac..000000000 --- a/vendor/github.com/aws/smithy-go/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -PRE_RELEASE_VERSION ?= - -RELEASE_MANIFEST_FILE ?= -RELEASE_CHGLOG_DESC_FILE ?= - -REPOTOOLS_VERSION ?= latest -REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools -REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= -REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} - -UNIT_TEST_TAGS= -BUILD_TAGS= - -ifneq ($(PRE_RELEASE_VERSION),) - REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} -endif - -smithy-publish-local: - cd codegen && ./gradlew publishToMavenLocal - -smithy-build: - cd codegen && ./gradlew build - -smithy-clean: - cd codegen && ./gradlew clean - -################## -# Linting/Verify # -################## -.PHONY: verify vet cover - -verify: vet - -vet: - go vet ${BUILD_TAGS} --all ./... - -cover: - go test ${BUILD_TAGS} -coverprofile c.out ./... - @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \ - echo "total (statements): $$cover%"; - -################ -# Unit Testing # -################ -.PHONY: unit unit-race unit-test unit-race-test - -unit: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -unit-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -##################### -# Release Process # -##################### -.PHONY: preview-release pre-release-validation release - -preview-release: - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - -pre-release-validation: - @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ - echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ - fi - @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ - echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ - fi - -release: pre-release-validation - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} - go run ${REPOTOOLS_CMD_CHANGELOG} rm -all - go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} - -module-version: - @go run ${REPOTOOLS_CMD_MODULE_VERSION} . - -############## -# Repo Tools # -############## -.PHONY: install-changelog - -install-changelog: - go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE deleted file mode 100644 index 616fc5889..000000000 --- a/vendor/github.com/aws/smithy-go/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md deleted file mode 100644 index 08df74589..000000000 --- a/vendor/github.com/aws/smithy-go/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# Smithy Go - -[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) - -[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. - -The smithy-go runtime requires a minimum version of Go 1.20. - -**WARNING: All interfaces are subject to change.** - -## Can I use the code generators? - -In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), -such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), -in order to generate transport mechanisms and serialization/deserialization -code ("serde") accordingly. - -The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, -therefore the useability of this project on its own is currently limited. -Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) -exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are -tracking the movement of those out of the SDK into smithy-go in -[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no -timeline for doing so. - -## Plugins - -This repository implements the following Smithy build plugins: - -| ID | GAV prefix | Description | -|----|------------|-------------| -| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | -| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | - -**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** - -## `go-codegen` - -### Configuration - -[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java) -contains all of the settings enabled from `smithy-build.json` and helper -methods and types. The up-to-date list of top-level properties enabled for -`go-client-codegen` can be found in `GoSettings::from()`. - -| Setting | Type | Required | Description | -|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------| -| `service` | string | yes | The Shape ID of the service for which to generate the client. | -| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. | -| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. | -| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. | - -### Supported protocols - -| Protocol | Notes | -|----------|-------| -| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. | - -### Example - -This example applies the `go-codegen` build plugin to the Smithy quickstart -example created from `smithy init`: - -```json -{ - "version": "1.0", - "sources": [ - "models" - ], - "maven": { - "dependencies": [ - "software.amazon.smithy.go:smithy-go-codegen:0.1.0" - ] - }, - "plugins": { - "go-codegen": { - "service": "example.weather#Weather", - "module": "github.com/example/weather", - "generateGoMod": true, - "goDirective": "1.20" - } - } -} -``` - -## `go-server-codegen` - -This plugin is a work-in-progress and is currently undocumented. - -## License - -This project is licensed under the Apache-2.0 License. - diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go deleted file mode 100644 index 5bdb70c9a..000000000 --- a/vendor/github.com/aws/smithy-go/auth/auth.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package auth defines protocol-agnostic authentication types for smithy -// clients. -package auth diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go deleted file mode 100644 index 1c9b9715c..000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package bearer provides middleware and utilities for authenticating API -// operation calls with a Bearer Token. -package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go deleted file mode 100644 index 8c7d72099..000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go +++ /dev/null @@ -1,104 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// Message is the middleware stack's request transport message value. -type Message interface{} - -// Signer provides an interface for implementations to decorate a request -// message with a bearer token. The signer is responsible for validating the -// message type is compatible with the signer. -type Signer interface { - SignWithBearerToken(context.Context, Token, Message) (Message, error) -} - -// AuthenticationMiddleware provides the Finalize middleware step for signing -// an request message with a bearer token. -type AuthenticationMiddleware struct { - signer Signer - tokenProvider TokenProvider -} - -// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the -// middleware Stack in the Finalize step with the options provided. -func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { - return s.Finalize.Add( - NewAuthenticationMiddleware(signer, tokenProvider), - middleware.After, - ) -} - -// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. -func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { - return &AuthenticationMiddleware{ - signer: signer, - tokenProvider: tokenProvider, - } -} - -const authenticationMiddlewareID = "BearerTokenAuthentication" - -// ID returns the resolver identifier -func (m *AuthenticationMiddleware) ID() string { - return authenticationMiddlewareID -} - -// HandleFinalize implements the FinalizeMiddleware interface in order to -// update the request with bearer token authentication. -func (m *AuthenticationMiddleware) HandleFinalize( - ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, -) ( - out middleware.FinalizeOutput, metadata middleware.Metadata, err error, -) { - token, err := m.tokenProvider.RetrieveBearerToken(ctx) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) - } - - signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) - if err != nil { - return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) - } - - in.Request = signedMessage - return next.HandleFinalize(ctx, in) -} - -// SignHTTPSMessage provides a bearer token authentication implementation that -// will sign the message with the provided bearer token. -// -// Will fail if the message is not a smithy-go HTTP request or the request is -// not HTTPS. -type SignHTTPSMessage struct{} - -// NewSignHTTPSMessage returns an initialized signer for HTTP messages. -func NewSignHTTPSMessage() *SignHTTPSMessage { - return &SignHTTPSMessage{} -} - -// SignWithBearerToken returns a copy of the HTTP request with the bearer token -// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. -// -// Returns an error if the request's URL scheme is not HTTPS, or the request -// message is not an smithy-go HTTP Request pointer type. -func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { - req, ok := message.(*smithyhttp.Request) - if !ok { - return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) - } - - if !req.IsHTTPS() { - return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") - } - - reqClone := req.Clone() - reqClone.Header.Set("Authorization", "Bearer "+token.Value) - - return reqClone, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go deleted file mode 100644 index be260d4c7..000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token.go +++ /dev/null @@ -1,50 +0,0 @@ -package bearer - -import ( - "context" - "time" -) - -// Token provides a type wrapping a bearer token and expiration metadata. -type Token struct { - Value string - - CanExpire bool - Expires time.Time -} - -// Expired returns if the token's Expires time is before or equal to the time -// provided. If CanExpires is false, Expired will always return false. -func (t Token) Expired(now time.Time) bool { - if !t.CanExpire { - return false - } - now = now.Round(0) - return now.Equal(t.Expires) || now.After(t.Expires) -} - -// TokenProvider provides interface for retrieving bearer tokens. -type TokenProvider interface { - RetrieveBearerToken(context.Context) (Token, error) -} - -// TokenProviderFunc provides a helper utility to wrap a function as a type -// that implements the TokenProvider interface. -type TokenProviderFunc func(context.Context) (Token, error) - -// RetrieveBearerToken calls the wrapped function, returning the Token or -// error. -func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { - return fn(ctx) -} - -// StaticTokenProvider provides a utility for wrapping a static bearer token -// value within an implementation of a token provider. -type StaticTokenProvider struct { - Token Token -} - -// RetrieveBearerToken returns the static token specified. -func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { - return s.Token, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go deleted file mode 100644 index 223ddf52b..000000000 --- a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go +++ /dev/null @@ -1,208 +0,0 @@ -package bearer - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - smithycontext "github.com/aws/smithy-go/context" - "github.com/aws/smithy-go/internal/sync/singleflight" -) - -// package variable that can be override in unit tests. -var timeNow = time.Now - -// TokenCacheOptions provides a set of optional configuration options for the -// TokenCache TokenProvider. -type TokenCacheOptions struct { - // The duration before the token will expire when the credentials will be - // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls - // will be blocking. - // - // Asynchronous refreshes are deduplicated, and only one will be in-flight - // at a time. If the token expires while an asynchronous refresh is in - // flight, the next call to RetrieveBearerToken will block on that refresh - // to return. - RefreshBeforeExpires time.Duration - - // The timeout the underlying TokenProvider's RetrieveBearerToken call must - // return within, or will be canceled. Defaults to 0, no timeout. - // - // If 0 timeout, its possible for the underlying tokenProvider's - // RetrieveBearerToken call to block forever. Preventing subsequent - // TokenCache attempts to refresh the token. - // - // If this timeout is reached all pending deduplicated calls to - // TokenCache RetrieveBearerToken will fail with an error. - RetrieveBearerTokenTimeout time.Duration - - // The minimum duration between asynchronous refresh attempts. If the next - // asynchronous recent refresh attempt was within the minimum delay - // duration, the call to retrieve will return the current cached token, if - // not expired. - // - // The asynchronous retrieve is deduplicated across multiple calls when - // RetrieveBearerToken is called. The asynchronous retrieve is not a - // periodic task. It is only performed when the token has not yet expired, - // and the current item is within the RefreshBeforeExpires window, and the - // TokenCache's RetrieveBearerToken method is called. - // - // If 0, (default) there will be no minimum delay between asynchronous - // refresh attempts. - // - // If DisableAsyncRefresh is true, this option is ignored. - AsyncRefreshMinimumDelay time.Duration - - // Sets if the TokenCache will attempt to refresh the token in the - // background asynchronously instead of blocking for credentials to be - // refreshed. If disabled token refresh will be blocking. - // - // The first call to RetrieveBearerToken will always be blocking, because - // there is no cached token. - DisableAsyncRefresh bool -} - -// TokenCache provides an utility to cache Bearer Authentication tokens from a -// wrapped TokenProvider. The TokenCache can be has options to configure the -// cache's early and asynchronous refresh of the token. -type TokenCache struct { - options TokenCacheOptions - provider TokenProvider - - cachedToken atomic.Value - lastRefreshAttemptTime atomic.Value - sfGroup singleflight.Group -} - -// NewTokenCache returns a initialized TokenCache that implements the -// TokenProvider interface. Wrapping the provider passed in. Also taking a set -// of optional functional option parameters to configure the token cache. -func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { - var options TokenCacheOptions - for _, fn := range optFns { - fn(&options) - } - - return &TokenCache{ - options: options, - provider: provider, - } -} - -// RetrieveBearerToken returns the token if it could be obtained, or error if a -// valid token could not be retrieved. -// -// The passed in Context's cancel/deadline/timeout will impacting only this -// individual retrieve call and not any other already queued up calls. This -// means underlying provider's RetrieveBearerToken calls could block for ever, -// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to -// provide a timeout, preventing the underlying TokenProvider blocking forever. -// -// By default, if the passed in Context is canceled, all of its values will be -// considered expired. The wrapped TokenProvider will not be able to lookup the -// values from the Context once it is expired. This is done to protect against -// expired values no longer being valid. To disable this behavior, use -// smithy-go's context.WithPreserveExpiredValues to add a value to the Context -// before calling RetrieveBearerToken to enable support for expired values. -// -// Without RetrieveBearerTokenTimeout there is the potential for a underlying -// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent -// attempts at refreshing the token. -func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { - cachedToken, ok := p.getCachedToken() - if !ok || cachedToken.Expired(timeNow()) { - return p.refreshBearerToken(ctx) - } - - // Check if the token should be refreshed before it expires. - refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) - if !refreshToken { - return cachedToken, nil - } - - if p.options.DisableAsyncRefresh { - return p.refreshBearerToken(ctx) - } - - p.tryAsyncRefresh(ctx) - - return cachedToken, nil -} - -// tryAsyncRefresh attempts to asynchronously refresh the token returning the -// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and -// the duration since the last refresh is less than that value, nothing will be -// done. -func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { - if p.options.AsyncRefreshMinimumDelay != 0 { - var lastRefreshAttempt time.Time - if v := p.lastRefreshAttemptTime.Load(); v != nil { - lastRefreshAttempt = v.(time.Time) - } - - if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { - return - } - } - - // Ignore the returned channel so this won't be blocking, and limit the - // number of additional goroutines created. - p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { - res, err := p.refreshBearerToken(ctx) - if p.options.AsyncRefreshMinimumDelay != 0 { - var refreshAttempt time.Time - if err != nil { - refreshAttempt = timeNow() - } - p.lastRefreshAttemptTime.Store(refreshAttempt) - } - - return res, err - }) -} - -func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { - resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { - ctx := smithycontext.WithSuppressCancel(ctx) - if v := p.options.RetrieveBearerTokenTimeout; v != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, v) - defer cancel() - } - return p.singleRetrieve(ctx) - }) - - select { - case res := <-resCh: - return res.Val.(Token), res.Err - case <-ctx.Done(): - return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) - } -} - -func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { - token, err := p.provider.RetrieveBearerToken(ctx) - if err != nil { - return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) - } - - p.cachedToken.Store(&token) - return token, nil -} - -// getCachedToken returns the currently cached token and true if found. Returns -// false if no token is cached. -func (p *TokenCache) getCachedToken() (Token, bool) { - v := p.cachedToken.Load() - if v == nil { - return Token{}, false - } - - t := v.(*Token) - if t == nil || t.Value == "" { - return Token{}, false - } - - return *t, true -} diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go deleted file mode 100644 index ba8cf70d4..000000000 --- a/vendor/github.com/aws/smithy-go/auth/identity.go +++ /dev/null @@ -1,47 +0,0 @@ -package auth - -import ( - "context" - "time" - - "github.com/aws/smithy-go" -) - -// Identity contains information that identifies who the user making the -// request is. -type Identity interface { - Expiration() time.Time -} - -// IdentityResolver defines the interface through which an Identity is -// retrieved. -type IdentityResolver interface { - GetIdentity(context.Context, smithy.Properties) (Identity, error) -} - -// IdentityResolverOptions defines the interface through which an entity can be -// queried to retrieve an IdentityResolver for a given auth scheme. -type IdentityResolverOptions interface { - GetIdentityResolver(schemeID string) IdentityResolver -} - -// AnonymousIdentity is a sentinel to indicate no identity. -type AnonymousIdentity struct{} - -var _ Identity = (*AnonymousIdentity)(nil) - -// Expiration returns the zero value for time, as anonymous identity never -// expires. -func (*AnonymousIdentity) Expiration() time.Time { - return time.Time{} -} - -// AnonymousIdentityResolver returns AnonymousIdentity. -type AnonymousIdentityResolver struct{} - -var _ IdentityResolver = (*AnonymousIdentityResolver)(nil) - -// GetIdentity returns AnonymousIdentity. -func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) { - return &AnonymousIdentity{}, nil -} diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go deleted file mode 100644 index d5dabff04..000000000 --- a/vendor/github.com/aws/smithy-go/auth/option.go +++ /dev/null @@ -1,25 +0,0 @@ -package auth - -import "github.com/aws/smithy-go" - -type ( - authOptionsKey struct{} -) - -// Option represents a possible authentication method for an operation. -type Option struct { - SchemeID string - IdentityProperties smithy.Properties - SignerProperties smithy.Properties -} - -// GetAuthOptions gets auth Options from Properties. -func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) { - v, ok := p.Get(authOptionsKey{}).([]*Option) - return v, ok -} - -// SetAuthOptions sets auth Options on Properties. -func SetAuthOptions(p *smithy.Properties, options []*Option) { - p.Set(authOptionsKey{}, options) -} diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go deleted file mode 100644 index fb6a57c64..000000000 --- a/vendor/github.com/aws/smithy-go/auth/scheme_id.go +++ /dev/null @@ -1,20 +0,0 @@ -package auth - -// Anonymous -const ( - SchemeIDAnonymous = "smithy.api#noAuth" -) - -// HTTP auth schemes -const ( - SchemeIDHTTPBasic = "smithy.api#httpBasicAuth" - SchemeIDHTTPDigest = "smithy.api#httpDigestAuth" - SchemeIDHTTPBearer = "smithy.api#httpBearerAuth" - SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth" -) - -// AWS auth schemes -const ( - SchemeIDSigV4 = "aws.auth#sigv4" - SchemeIDSigV4A = "aws.auth#sigv4a" -) diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go deleted file mode 100644 index 69af87751..000000000 --- a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package cache defines the interface for a key-based data store. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package cache - -// Cache defines the interface for an opaquely-typed, key-based data store. -// -// The thread-safety of this interface is undefined and is dictated by -// implementations. -type Cache interface { - // Retrieve the value associated with the given key. The returned boolean - // indicates whether the cache held a value for the given key. - Get(k interface{}) (interface{}, bool) - - // Store a value under the given key. - Put(k interface{}, v interface{}) -} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go deleted file mode 100644 index 02ecb0a32..000000000 --- a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package lru implements [cache.Cache] with an LRU eviction policy. -// -// This implementation is NOT thread-safe. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package lru - -import ( - "container/list" - - "github.com/aws/smithy-go/container/private/cache" -) - -// New creates a new LRU cache with the given capacity. -func New(cap int) cache.Cache { - return &lru{ - entries: make(map[interface{}]*list.Element, cap), - cap: cap, - mru: list.New(), - } -} - -type lru struct { - entries map[interface{}]*list.Element - cap int - - mru *list.List // least-recently used is at the back -} - -type element struct { - key interface{} - value interface{} -} - -func (l *lru) Get(k interface{}) (interface{}, bool) { - e, ok := l.entries[k] - if !ok { - return nil, false - } - - l.mru.MoveToFront(e) - return e.Value.(*element).value, true -} - -func (l *lru) Put(k interface{}, v interface{}) { - if len(l.entries) == l.cap { - l.evict() - } - - ev := &element{ - key: k, - value: v, - } - e := l.mru.PushFront(ev) - l.entries[k] = e -} - -func (l *lru) evict() { - e := l.mru.Remove(l.mru.Back()) - delete(l.entries, e.(*element).key) -} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go deleted file mode 100644 index a39b84a27..000000000 --- a/vendor/github.com/aws/smithy-go/context/suppress_expired.go +++ /dev/null @@ -1,81 +0,0 @@ -package context - -import "context" - -// valueOnlyContext provides a utility to preserve only the values of a -// Context. Suppressing any cancellation or deadline on that context being -// propagated downstream of this value. -// -// If preserveExpiredValues is false (default), and the valueCtx is canceled, -// calls to lookup values with the Values method, will always return nil. Setting -// preserveExpiredValues to true, will allow the valueOnlyContext to lookup -// values in valueCtx even if valueCtx is canceled. -// -// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. -// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go -type valueOnlyContext struct { - context.Context - - preserveExpiredValues bool - valuesCtx context.Context -} - -var _ context.Context = (*valueOnlyContext)(nil) - -// Value looks up the key, returning its value. If configured to not preserve -// values of expired context, and the wrapping context is canceled, nil will be -// returned. -func (v *valueOnlyContext) Value(key interface{}) interface{} { - if !v.preserveExpiredValues { - select { - case <-v.valuesCtx.Done(): - return nil - default: - } - } - - return v.valuesCtx.Value(key) -} - -// WithSuppressCancel wraps the Context value, suppressing its deadline and -// cancellation events being propagated downstream to consumer of the returned -// context. -// -// By default the wrapped Context's Values are available downstream until the -// wrapped Context is canceled. Once the wrapped Context is canceled, Values -// method called on the context return will no longer lookup any key. As they -// are now considered expired. -// -// To override this behavior, use WithPreserveExpiredValues on the Context -// before it is wrapped by WithSuppressCancel. This will make the Context -// returned by WithSuppressCancel allow lookup of expired values. -func WithSuppressCancel(ctx context.Context) context.Context { - return &valueOnlyContext{ - Context: context.Background(), - valuesCtx: ctx, - - preserveExpiredValues: GetPreserveExpiredValues(ctx), - } -} - -type preserveExpiredValuesKey struct{} - -// WithPreserveExpiredValues adds a Value to the Context if expired values -// should be preserved, and looked up by a Context wrapped by -// WithSuppressCancel. -// -// WithPreserveExpiredValues must be added as a value to a Context, before that -// Context is wrapped by WithSuppressCancel -func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { - return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) -} - -// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues -// value in the context. Returning true if enabled, false otherwise. -func GetPreserveExpiredValues(ctx context.Context) bool { - v := ctx.Value(preserveExpiredValuesKey{}) - if v != nil { - return v.(bool) - } - return false -} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go deleted file mode 100644 index 87b0c74b7..000000000 --- a/vendor/github.com/aws/smithy-go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy provides the core components for a Smithy SDK. -package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go deleted file mode 100644 index dec498c57..000000000 --- a/vendor/github.com/aws/smithy-go/document.go +++ /dev/null @@ -1,10 +0,0 @@ -package smithy - -// Document provides access to loosely structured data in a document-like -// format. -// -// Deprecated: See the github.com/aws/smithy-go/document package. -type Document interface { - UnmarshalDocument(interface{}) error - GetValue() (interface{}, error) -} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go deleted file mode 100644 index 03055b7a1..000000000 --- a/vendor/github.com/aws/smithy-go/document/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package document provides interface definitions and error types for document types. -// -// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send -// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 -// strings to these values. -// -// API Clients expose document constructors in their respective client document packages which must be used to -// Marshal and Unmarshal Go types to and from their respective protocol representations. -// -// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from -// document types. -package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go deleted file mode 100644 index 8f852d95c..000000000 --- a/vendor/github.com/aws/smithy-go/document/document.go +++ /dev/null @@ -1,153 +0,0 @@ -package document - -import ( - "fmt" - "math/big" - "strconv" -) - -// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and -// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. -// -// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. -// Anonymous nested types are flattened based on Go anonymous type visibility. -// -// When defining struct types. the `document` struct tag can be used to control how the value will be -// marshaled into the resulting protocol document. -// -// // Field is ignored -// Field int `document:"-"` -// -// // Field object of key "myName" -// Field int `document:"myName"` -// -// // Field object key of key "myName", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:"myName,omitempty"` -// -// // Field object key of "Field", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:",omitempty"` -// -// All struct fields, including anonymous fields, are marshaled unless the -// any of the following conditions are meet. -// -// - the field is not exported -// - document field tag is "-" -// - document field tag specifies "omitempty", and is a zero value. -// -// Pointer and interface values are encoded as the value pointed to or -// contained in the interface. A nil value encodes as a null -// value unless `omitempty` struct tag is provided. -// -// Channel, complex, and function values are not encoded and will be skipped -// when walking the value to be marshaled. -// -// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented -// by your application as a string or numerical representation. -// -// Errors that occur when marshaling will stop the marshaler, and return the error. -// -// Marshal cannot represent cyclic data structures and will not handle them. -// Passing cyclic structures to Marshal will result in an infinite recursion. -type Marshaler interface { - MarshalSmithyDocument() ([]byte, error) -} - -// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and -// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be -// returned. -// -// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` -// struct field tag for controlling how struct fields are unmarshaled. -// -// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document -// into an empty interface the Unmarshaler will store one of these values: -// bool, for boolean values -// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) -// string, for string values -// []interface{}, for array values -// map[string]interface{}, for objects -// nil, for null values -// -// When unmarshaling, any error that occurs will halt the unmarshal and return the error. -type Unmarshaler interface { - UnmarshalSmithyDocument(v interface{}) error -} - -type noSerde interface { - noSmithyDocumentSerde() -} - -// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled -// into a protocol document. -type NoSerde struct{} - -func (n NoSerde) noSmithyDocumentSerde() {} - -var _ noSerde = (*NoSerde)(nil) - -// IsNoSerde returns whether the given type implements the no smithy document serde interface. -func IsNoSerde(x interface{}) bool { - _, ok := x.(noSerde) - return ok -} - -// Number is an arbitrary precision numerical value -type Number string - -// Int64 returns the number as a string. -func (n Number) String() string { - return string(n) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return n.intOfBitSize(64) -} - -func (n Number) intOfBitSize(bitSize int) (int64, error) { - return strconv.ParseInt(string(n), 10, bitSize) -} - -// Uint64 returns the number as a uint64. -func (n Number) Uint64() (uint64, error) { - return n.uintOfBitSize(64) -} - -func (n Number) uintOfBitSize(bitSize int) (uint64, error) { - return strconv.ParseUint(string(n), 10, bitSize) -} - -// Float32 returns the number parsed as a 32-bit float, returns a float64. -func (n Number) Float32() (float64, error) { - return n.floatOfBitSize(32) -} - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return n.floatOfBitSize(64) -} - -// Float64 returns the number as a float64. -func (n Number) floatOfBitSize(bitSize int) (float64, error) { - return strconv.ParseFloat(string(n), bitSize) -} - -// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. -func (n Number) BigFloat() (*big.Float, error) { - f, ok := (&big.Float{}).SetString(string(n)) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} - -// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. -func (n Number) BigInt() (*big.Int, error) { - f, ok := (&big.Int{}).SetString(string(n), 10) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go deleted file mode 100644 index 046a7a765..000000000 --- a/vendor/github.com/aws/smithy-go/document/errors.go +++ /dev/null @@ -1,75 +0,0 @@ -package document - -import ( - "fmt" - "reflect" -) - -// UnmarshalTypeError is an error type representing an error -// unmarshaling a Smithy document to a Go value type. This is different -// from UnmarshalError in that it does not wrap an underlying error type. -type UnmarshalTypeError struct { - Value string - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalTypeError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", - e.Value, e.Type.String()) -} - -// An InvalidUnmarshalError is an error type representing an invalid type -// encountered while unmarshaling a Smithy document to a Go value type. -type InvalidUnmarshalError struct { - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidUnmarshalError) Error() string { - var msg string - if e.Type == nil { - msg = "cannot unmarshal to nil value" - } else if e.Type.Kind() != reflect.Ptr { - msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) - } else { - msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) - } - - return fmt.Sprintf("unmarshal failed, %s", msg) -} - -// An UnmarshalError wraps an error that occurred while unmarshaling a -// Smithy document into a Go type. This is different from -// UnmarshalTypeError in that it wraps the underlying error that occurred. -type UnmarshalError struct { - Err error - Value string - Type reflect.Type -} - -// Unwrap returns the underlying unmarshaling error -func (e *UnmarshalError) Unwrap() error { - return e.Err -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", - e.Value, e.Type.String(), e.Err) -} - -// An InvalidMarshalError is an error type representing an error -// occurring when marshaling a Go value type. -type InvalidMarshalError struct { - Message string -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidMarshalError) Error() string { - return fmt.Sprintf("marshal failed, %s", e.Message) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go deleted file mode 100644 index 792fdfa08..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package encoding provides utilities for encoding values for specific -// document encodings. - -package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go deleted file mode 100644 index 2fdfb5225..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/encoding.go +++ /dev/null @@ -1,40 +0,0 @@ -package encoding - -import ( - "fmt" - "math" - "strconv" -) - -// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol -// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers -// -// Based on encoding/json floatEncoder from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func EncodeFloat(dst []byte, v float64, bits int) []byte { - if math.IsInf(v, 0) || math.IsNaN(v) { - panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) - } - - abs := math.Abs(v) - fmt := byte('f') - - if abs != 0 { - if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { - fmt = 'e' - } - } - - dst = strconv.AppendFloat(dst, v, fmt, -1, bits) - - if fmt == 'e' { - // clean up e-09 to e-9 - n := len(dst) - if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { - dst[n-2] = dst[n-1] - dst = dst[:n-1] - } - } - - return dst -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go deleted file mode 100644 index 543e7cf03..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go +++ /dev/null @@ -1,123 +0,0 @@ -package httpbinding - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -const ( - contentLengthHeader = "Content-Length" - floatNaN = "NaN" - floatInfinity = "Infinity" - floatNegInfinity = "-Infinity" -) - -// An Encoder provides encoding of REST URI path, query, and header components -// of an HTTP request. Can also encode a stream as the payload. -// -// Does not support SetFields. -type Encoder struct { - path, rawPath, pathBuffer []byte - - query url.Values - header http.Header -} - -// NewEncoder creates a new encoder from the passed in request. It assumes that -// raw path contains no valuable information at this point, so it passes in path -// as path and raw path for subsequent trans -func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { - return NewEncoderWithRawPath(path, path, query, headers) -} - -// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and -// header values will be added on top of the request's existing values. Overwriting -// duplicate values. -func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { - parseQuery, err := url.ParseQuery(query) - if err != nil { - return nil, fmt.Errorf("failed to parse query string: %w", err) - } - - e := &Encoder{ - path: []byte(path), - rawPath: []byte(rawPath), - query: parseQuery, - header: headers.Clone(), - } - - return e, nil -} - -// Encode returns a REST protocol encoder for encoding HTTP bindings. -// -// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode -// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. -// -// Returns any error occurring during encoding. -func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { - req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) - req.URL.RawQuery = e.query.Encode() - - // net/http ignores Content-Length header and requires it to be set on http.Request - if v := e.header.Get(contentLengthHeader); len(v) > 0 { - iv, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, err - } - req.ContentLength = iv - e.header.Del(contentLengthHeader) - } - - req.Header = e.header - - return req, nil -} - -// AddHeader returns a HeaderValue for appending to the given header name -func (e *Encoder) AddHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, true) -} - -// SetHeader returns a HeaderValue for setting the given header name -func (e *Encoder) SetHeader(key string) HeaderValue { - return newHeaderValue(e.header, key, false) -} - -// Headers returns a Header used for encoding headers with the given prefix -func (e *Encoder) Headers(prefix string) Headers { - return Headers{ - header: e.header, - prefix: strings.TrimSpace(prefix), - } -} - -// HasHeader returns if a header with the key specified exists with one or -// more value. -func (e Encoder) HasHeader(key string) bool { - return len(e.header[key]) != 0 -} - -// SetURI returns a URIValue used for setting the given path key -func (e *Encoder) SetURI(key string) URIValue { - return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) -} - -// SetQuery returns a QueryValue used for setting the given query key -func (e *Encoder) SetQuery(key string) QueryValue { - return NewQueryValue(e.query, key, false) -} - -// AddQuery returns a QueryValue used for appending the given query key -func (e *Encoder) AddQuery(key string) QueryValue { - return NewQueryValue(e.query, key, true) -} - -// HasQuery returns if a query with the key specified exists with one or -// more values. -func (e *Encoder) HasQuery(key string) bool { - return len(e.query.Get(key)) != 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go deleted file mode 100644 index f9256e175..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go +++ /dev/null @@ -1,122 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/http" - "strconv" - "strings" -) - -// Headers is used to encode header keys using a provided prefix -type Headers struct { - header http.Header - prefix string -} - -// AddHeader returns a HeaderValue used to append values to prefix+key -func (h Headers) AddHeader(key string) HeaderValue { - return h.newHeaderValue(key, true) -} - -// SetHeader returns a HeaderValue used to set the value of prefix+key -func (h Headers) SetHeader(key string) HeaderValue { - return h.newHeaderValue(key, false) -} - -func (h Headers) newHeaderValue(key string, append bool) HeaderValue { - return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) -} - -// HeaderValue is used to encode values to an HTTP header -type HeaderValue struct { - header http.Header - key string - append bool -} - -func newHeaderValue(header http.Header, key string, append bool) HeaderValue { - return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} -} - -func (h HeaderValue) modifyHeader(value string) { - if h.append { - h.header[h.key] = append(h.header[h.key], value) - } else { - h.header[h.key] = append(h.header[h.key][:0], value) - } -} - -// String encodes the value v as the header string value -func (h HeaderValue) String(v string) { - h.modifyHeader(v) -} - -// Byte encodes the value v as a query string value -func (h HeaderValue) Byte(v int8) { - h.Long(int64(v)) -} - -// Short encodes the value v as a query string value -func (h HeaderValue) Short(v int16) { - h.Long(int64(v)) -} - -// Integer encodes the value v as the header string value -func (h HeaderValue) Integer(v int32) { - h.Long(int64(v)) -} - -// Long encodes the value v as the header string value -func (h HeaderValue) Long(v int64) { - h.modifyHeader(strconv.FormatInt(v, 10)) -} - -// Boolean encodes the value v as a query string value -func (h HeaderValue) Boolean(v bool) { - h.modifyHeader(strconv.FormatBool(v)) -} - -// Float encodes the value v as a query string value -func (h HeaderValue) Float(v float32) { - h.float(float64(v), 32) -} - -// Double encodes the value v as a query string value -func (h HeaderValue) Double(v float64) { - h.float(v, 64) -} - -func (h HeaderValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - h.String(floatNaN) - case math.IsInf(v, 1): - h.String(floatInfinity) - case math.IsInf(v, -1): - h.String(floatNegInfinity) - default: - h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes the value v as a query string value -func (h HeaderValue) BigInteger(v *big.Int) { - h.modifyHeader(v.String()) -} - -// BigDecimal encodes the value v as a query string value -func (h HeaderValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - h.Long(i) - return - } - h.modifyHeader(v.Text('e', -1)) -} - -// Blob encodes the value v as a base64 header string value -func (h HeaderValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - h.modifyHeader(encodeToString) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go deleted file mode 100644 index 9ae308540..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ /dev/null @@ -1,108 +0,0 @@ -package httpbinding - -import ( - "bytes" - "fmt" -) - -const ( - uriTokenStart = '{' - uriTokenStop = '}' - uriTokenSkip = '+' -) - -func bufCap(b []byte, n int) []byte { - if cap(b) < n { - return make([]byte, 0, n) - } - - return b[0:0] -} - -// replacePathElement replaces a single element in the path []byte. -// Escape is used to control whether the value will be escaped using Amazon path escape style. -func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { - // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error - fieldBuf = bufCap(fieldBuf, len(key)+2) // { } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - fieldBuf = append(fieldBuf, uriTokenStop) - - start := bytes.Index(path, fieldBuf) - encodeSep := true - if start < 0 { - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } - fieldBuf = append(fieldBuf, uriTokenStart) - fieldBuf = append(fieldBuf, key...) - fieldBuf = append(fieldBuf, uriTokenSkip) - fieldBuf = append(fieldBuf, uriTokenStop) - - start = bytes.Index(path, fieldBuf) - if start < 0 { - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d. %s", start, path) - } - encodeSep = false - } - end := start + len(fieldBuf) - - if escape { - val = EscapePath(val, encodeSep) - } - - fieldBuf = bufCap(fieldBuf, len(val)) - fieldBuf = append(fieldBuf, val...) - - keyLen := end - start - valLen := len(fieldBuf) - - if keyLen == valLen { - copy(path[start:], fieldBuf) - return path, fieldBuf, nil - } - - newLen := len(path) + (valLen - keyLen) - if len(path) < newLen { - path = path[:cap(path)] - } - if cap(path) < newLen { - newURI := make([]byte, newLen) - copy(newURI, path) - path = newURI - } - - // shift - copy(path[start+valLen:], path[end:]) - path = path[:newLen] - copy(path[start:], fieldBuf) - - return path, fieldBuf, nil -} - -// EscapePath escapes part of a URL path in Amazon style. -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go deleted file mode 100644 index c2e7d0a20..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go +++ /dev/null @@ -1,107 +0,0 @@ -package httpbinding - -import ( - "encoding/base64" - "math" - "math/big" - "net/url" - "strconv" -) - -// QueryValue is used to encode query key values -type QueryValue struct { - query url.Values - key string - append bool -} - -// NewQueryValue creates a new QueryValue which enables encoding -// a query value into the given url.Values. -func NewQueryValue(query url.Values, key string, append bool) QueryValue { - return QueryValue{ - query: query, - key: key, - append: append, - } -} - -func (qv QueryValue) updateKey(value string) { - if qv.append { - qv.query.Add(qv.key, value) - } else { - qv.query.Set(qv.key, value) - } -} - -// Blob encodes v as a base64 query string value -func (qv QueryValue) Blob(v []byte) { - encodeToString := base64.StdEncoding.EncodeToString(v) - qv.updateKey(encodeToString) -} - -// Boolean encodes v as a query string value -func (qv QueryValue) Boolean(v bool) { - qv.updateKey(strconv.FormatBool(v)) -} - -// String encodes v as a query string value -func (qv QueryValue) String(v string) { - qv.updateKey(v) -} - -// Byte encodes v as a query string value -func (qv QueryValue) Byte(v int8) { - qv.Long(int64(v)) -} - -// Short encodes v as a query string value -func (qv QueryValue) Short(v int16) { - qv.Long(int64(v)) -} - -// Integer encodes v as a query string value -func (qv QueryValue) Integer(v int32) { - qv.Long(int64(v)) -} - -// Long encodes v as a query string value -func (qv QueryValue) Long(v int64) { - qv.updateKey(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (qv QueryValue) Float(v float32) { - qv.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (qv QueryValue) Double(v float64) { - qv.float(v, 64) -} - -func (qv QueryValue) float(v float64, bitSize int) { - switch { - case math.IsNaN(v): - qv.String(floatNaN) - case math.IsInf(v, 1): - qv.String(floatInfinity) - case math.IsInf(v, -1): - qv.String(floatNegInfinity) - default: - qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (qv QueryValue) BigInteger(v *big.Int) { - qv.updateKey(v.String()) -} - -// BigDecimal encodes v as a query string value -func (qv QueryValue) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - qv.Long(i) - return - } - qv.updateKey(v.Text('e', -1)) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go deleted file mode 100644 index f04e11984..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go +++ /dev/null @@ -1,111 +0,0 @@ -package httpbinding - -import ( - "math" - "math/big" - "strconv" - "strings" -) - -// URIValue is used to encode named URI parameters -type URIValue struct { - path, rawPath, buffer *[]byte - - key string -} - -func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { - return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} -} - -func (u URIValue) modifyURI(value string) (err error) { - *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) - if err != nil { - return err - } - *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) - return err -} - -// Boolean encodes v as a URI string value -func (u URIValue) Boolean(v bool) error { - return u.modifyURI(strconv.FormatBool(v)) -} - -// String encodes v as a URI string value -func (u URIValue) String(v string) error { - return u.modifyURI(v) -} - -// Byte encodes v as a URI string value -func (u URIValue) Byte(v int8) error { - return u.Long(int64(v)) -} - -// Short encodes v as a URI string value -func (u URIValue) Short(v int16) error { - return u.Long(int64(v)) -} - -// Integer encodes v as a URI string value -func (u URIValue) Integer(v int32) error { - return u.Long(int64(v)) -} - -// Long encodes v as a URI string value -func (u URIValue) Long(v int64) error { - return u.modifyURI(strconv.FormatInt(v, 10)) -} - -// Float encodes v as a query string value -func (u URIValue) Float(v float32) error { - return u.float(float64(v), 32) -} - -// Double encodes v as a query string value -func (u URIValue) Double(v float64) error { - return u.float(v, 64) -} - -func (u URIValue) float(v float64, bitSize int) error { - switch { - case math.IsNaN(v): - return u.String(floatNaN) - case math.IsInf(v, 1): - return u.String(floatInfinity) - case math.IsInf(v, -1): - return u.String(floatNegInfinity) - default: - return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) - } -} - -// BigInteger encodes v as a query string value -func (u URIValue) BigInteger(v *big.Int) error { - return u.modifyURI(v.String()) -} - -// BigDecimal encodes v as a query string value -func (u URIValue) BigDecimal(v *big.Float) error { - if i, accuracy := v.Int64(); accuracy == big.Exact { - return u.Long(i) - } - return u.modifyURI(v.Text('e', -1)) -} - -// SplitURI parses a Smithy HTTP binding trait URI -func SplitURI(uri string) (path, query string) { - queryStart := strings.IndexRune(uri, '?') - if queryStart == -1 { - path = uri - return path, query - } - - path = uri[:queryStart] - if queryStart+1 >= len(uri) { - return path, query - } - query = uri[queryStart+1:] - - return path, query -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go deleted file mode 100644 index 7a232f660..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/array.go +++ /dev/null @@ -1,35 +0,0 @@ -package json - -import ( - "bytes" -) - -// Array represents the encoding of a JSON Array -type Array struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newArray(w *bytes.Buffer, scratch *[]byte) *Array { - w.WriteRune(leftBracket) - return &Array{w: w, scratch: scratch} -} - -// Value adds a new element to the JSON Array. -// Returns a Value type that is used to encode -// the array element. -func (a *Array) Value() Value { - if a.writeComma { - a.w.WriteRune(comma) - } else { - a.writeComma = true - } - - return newValue(a.w, a.scratch) -} - -// Close encodes the end of the JSON Array -func (a *Array) Close() { - a.w.WriteRune(rightBracket) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go deleted file mode 100644 index 91044092a..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -package json - -const ( - leftBrace = '{' - rightBrace = '}' - - leftBracket = '[' - rightBracket = ']' - - comma = ',' - quote = '"' - colon = ':' - - null = "null" -) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go deleted file mode 100644 index 7050c85b3..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go +++ /dev/null @@ -1,139 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// DiscardUnknownField discards unknown fields from a decoder body. -// This function is useful while deserializing a JSON body with additional -// unknown information that should be discarded. -func DiscardUnknownField(decoder *json.Decoder) error { - // This deliberately does not share logic with CollectUnknownField, even - // though it could, because if we were to delegate to that then we'd incur - // extra allocations and general memory usage. - v, err := decoder.Token() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - if _, ok := v.(json.Delim); ok { - for decoder.More() { - err = DiscardUnknownField(decoder) - } - endToken, err := decoder.Token() - if err != nil { - return err - } - if _, ok := endToken.(json.Delim); !ok { - return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", - endToken, endToken) - } - } - - return nil -} - -// CollectUnknownField grabs the contents of unknown fields from the decoder body -// and returns them as a byte slice. This is useful for skipping unknown fields without -// completely discarding them. -func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { - result, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - buff := bytes.NewBuffer(nil) - encoder := json.NewEncoder(buff) - - if err := encoder.Encode(result); err != nil { - return nil, err - } - - return buff.Bytes(), nil -} - -func collectUnknownField(decoder *json.Decoder) (interface{}, error) { - // Grab the initial value. This could either be a concrete value like a string or a a - // delimiter. - token, err := decoder.Token() - if err == io.EOF { - return nil, nil - } - if err != nil { - return nil, err - } - - // If it's an array or object, we'll need to recurse. - delim, ok := token.(json.Delim) - if ok { - var result interface{} - if delim == '{' { - result, err = collectUnknownObject(decoder) - if err != nil { - return nil, err - } - } else { - result, err = collectUnknownArray(decoder) - if err != nil { - return nil, err - } - } - - // Discard the closing token. decoder.Token handles checking for matching delimiters - if _, err := decoder.Token(); err != nil { - return nil, err - } - return result, nil - } - - return token, nil -} - -func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { - // We need to create an empty array here instead of a nil array, since by getting - // into this function at all we necessarily have seen a non-nil list. - array := []interface{}{} - - for decoder.More() { - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - array = append(array, value) - } - - return array, nil -} - -func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { - object := make(map[string]interface{}) - - for decoder.More() { - key, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - // Keys have to be strings, which is particularly important as the encoder - // won't except a map with interface{} keys - stringKey, ok := key.(string) - if !ok { - return nil, fmt.Errorf("expected string key, found %T", key) - } - - value, err := collectUnknownField(decoder) - if err != nil { - return nil, err - } - - object[stringKey] = value - } - - return object, nil -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go deleted file mode 100644 index 8772953f1..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go +++ /dev/null @@ -1,30 +0,0 @@ -package json - -import ( - "bytes" -) - -// Encoder is JSON encoder that supports construction of JSON values -// using methods. -type Encoder struct { - w *bytes.Buffer - Value -} - -// NewEncoder returns a new JSON encoder -func NewEncoder() *Encoder { - writer := bytes.NewBuffer(nil) - scratch := make([]byte, 64) - - return &Encoder{w: writer, Value: newValue(writer, &scratch)} -} - -// String returns the String output of the JSON encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the JSON encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go deleted file mode 100644 index d984d0cdc..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/escape.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet - -package json - -import ( - "bytes" - "unicode/utf8" -) - -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} - -// copied from Go 1.8 stdlib's encoding/json/#hex -var hex = "0123456789abcdef" - -// escapeStringBytes escapes and writes the passed in string bytes to the dst -// buffer -// -// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes -func escapeStringBytes(e *bytes.Buffer, s []byte) { - e.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - e.Write(s[start:i]) - } - switch b { - case '\\', '"': - e.WriteByte('\\') - e.WriteByte(b) - case '\n': - e.WriteByte('\\') - e.WriteByte('n') - case '\r': - e.WriteByte('\\') - e.WriteByte('r') - case '\t': - e.WriteByte('\\') - e.WriteByte('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - // If escapeHTML is set, it also escapes <, >, and & - // because they can lead to security holes when - // user-controlled strings are rendered into JSON - // and served to some browsers. - e.WriteString(`\u00`) - e.WriteByte(hex[b>>4]) - e.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - e.Write(s[start:i]) - } - e.WriteString(`\u202`) - e.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - e.Write(s[start:]) - } - e.WriteByte('"') -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go deleted file mode 100644 index 722346d03..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/object.go +++ /dev/null @@ -1,40 +0,0 @@ -package json - -import ( - "bytes" -) - -// Object represents the encoding of a JSON Object type -type Object struct { - w *bytes.Buffer - writeComma bool - scratch *[]byte -} - -func newObject(w *bytes.Buffer, scratch *[]byte) *Object { - w.WriteRune(leftBrace) - return &Object{w: w, scratch: scratch} -} - -func (o *Object) writeKey(key string) { - escapeStringBytes(o.w, []byte(key)) - o.w.WriteRune(colon) -} - -// Key adds the given named key to the JSON object. -// Returns a Value encoder that should be used to encode -// a JSON value type. -func (o *Object) Key(name string) Value { - if o.writeComma { - o.w.WriteRune(comma) - } else { - o.writeComma = true - } - o.writeKey(name) - return newValue(o.w, o.scratch) -} - -// Close encodes the end of the JSON Object -func (o *Object) Close() { - o.w.WriteRune(rightBrace) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go deleted file mode 100644 index b41ff1e15..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/json/value.go +++ /dev/null @@ -1,149 +0,0 @@ -package json - -import ( - "bytes" - "encoding/base64" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents a JSON Value type -// JSON Value types: Object, Array, String, Number, Boolean, and Null -type Value struct { - w *bytes.Buffer - scratch *[]byte -} - -// newValue returns a new Value encoder -func newValue(w *bytes.Buffer, scratch *[]byte) Value { - return Value{w: w, scratch: scratch} -} - -// String encodes v as a JSON string -func (jv Value) String(v string) { - escapeStringBytes(jv.w, []byte(v)) -} - -// Byte encodes v as a JSON number -func (jv Value) Byte(v int8) { - jv.Long(int64(v)) -} - -// Short encodes v as a JSON number -func (jv Value) Short(v int16) { - jv.Long(int64(v)) -} - -// Integer encodes v as a JSON number -func (jv Value) Integer(v int32) { - jv.Long(int64(v)) -} - -// Long encodes v as a JSON number -func (jv Value) Long(v int64) { - *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// ULong encodes v as a JSON number -func (jv Value) ULong(v uint64) { - *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) - jv.w.Write(*jv.scratch) -} - -// Float encodes v as a JSON number -func (jv Value) Float(v float32) { - jv.float(float64(v), 32) -} - -// Double encodes v as a JSON number -func (jv Value) Double(v float64) { - jv.float(v, 64) -} - -func (jv Value) float(v float64, bits int) { - *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) - jv.w.Write(*jv.scratch) -} - -// Boolean encodes v as a JSON boolean -func (jv Value) Boolean(v bool) { - *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) - jv.w.Write(*jv.scratch) -} - -// Base64EncodeBytes writes v as a base64 value in JSON string -func (jv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(jv.w, (*jv.scratch)[:0], v) -} - -// Write writes v directly to the JSON document -func (jv Value) Write(v []byte) { - jv.w.Write(v) -} - -// Array returns a new Array encoder -func (jv Value) Array() *Array { - return newArray(jv.w, jv.scratch) -} - -// Object returns a new Object encoder -func (jv Value) Object() *Object { - return newObject(jv.w, jv.scratch) -} - -// Null encodes a null JSON value -func (jv Value) Null() { - jv.w.WriteString(null) -} - -// BigInteger encodes v as JSON value -func (jv Value) BigInteger(v *big.Int) { - jv.w.Write([]byte(v.Text(10))) -} - -// BigDecimal encodes v as JSON value -func (jv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - jv.Long(i) - return - } - // TODO: Should this try to match ES6 ToString similar to stdlib JSON? - jv.w.Write([]byte(v.Text('e', -1))) -} - -// Based on encoding/json encodeByteSlice from the Go Standard Library -// https://golang.org/src/encoding/json/encode.go -func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { - if v == nil { - w.WriteString(null) - return - } - - w.WriteRune(quote) - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } - - w.WriteRune(quote) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go deleted file mode 100644 index 508f3c997..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/array.go +++ /dev/null @@ -1,49 +0,0 @@ -package xml - -// arrayMemberWrapper is the default member wrapper tag name for XML Array type -var arrayMemberWrapper = StartElement{ - Name: Name{Local: "member"}, -} - -// Array represents the encoding of a XML array type -type Array struct { - w writer - scratch *[]byte - - // member start element is the array member wrapper start element - memberStartElement StartElement - - // isFlattened indicates if the array is a flattened array. - isFlattened bool -} - -// newArray returns an array encoder. -// It also takes in the member start element, array start element. -// It takes in a isFlattened bool, indicating that an array is flattened array. -// -// A wrapped array ["value1", "value2"] is represented as -// `value1value2`. - -// A flattened array `someList: ["value1", "value2"]` is represented as -// `value1value2`. -func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { - var memberWrapper = memberStartElement - if isFlattened { - memberWrapper = arrayStartElement - } - - return &Array{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: isFlattened, - } -} - -// Member adds a new member to the XML array. -// It returns a Value encoder. -func (a *Array) Member() Value { - v := newValue(a.w, a.scratch, a.memberStartElement) - v.isFlattened = a.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go deleted file mode 100644 index ccee90a63..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go +++ /dev/null @@ -1,10 +0,0 @@ -package xml - -const ( - leftAngleBracket = '<' - rightAngleBracket = '>' - forwardSlash = '/' - colon = ':' - equals = '=' - quote = '"' -) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go deleted file mode 100644 index f9200093e..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to -shape serializer function in which a xml.Value will be passed around. - -Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings - -Member Element - -Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element -write their own element start tag. These elements should always be closed. - -Flattened Element - -Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element -do not write a start tag, and thus should not be closed. - -Simple types encoding - -All simple type methods on value such as String(), Long() etc; auto close the associated member element. - -Array - -Array returns the collection encoder. It has two modes, wrapped and flattened encoding. - -Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. -By default, a wrapped array members are wrapped with `member` named start element. - - appletree - -Flattened arrays rely on Value being marked as flattened. -If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. - - appletree - -Map - -Map is the map encoder. It has two modes, wrapped and flattened encoding. - -Wrapped map has Array() method, which facilitate map member wrapping. -By default, a wrapped map members are wrapped with `entry` named start element. - - appletreesnowice - -Flattened map rely on Value being marked as flattened. -If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. - - appletreesnowice -*/ -package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go deleted file mode 100644 index ae84e7999..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/element.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -// A Name represents an XML name (Local) annotated -// with a name space identifier (Space). -// In tokens returned by Decoder.Token, the Space identifier -// is given as a canonical URL, not the short prefix used -// in the document being parsed. -type Name struct { - Space, Local string -} - -// An Attr represents an attribute in an XML element (Name=Value). -type Attr struct { - Name Name - Value string -} - -/* -NewAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. -*/ -func NewAttribute(local, value string) Attr { - return Attr{ - Name: Name{ - Local: local, - }, - Value: value, - } -} - -/* -NewNamespaceAttribute returns a pointer to an attribute. -It takes in a local name aka attribute name, and value -representing the attribute value. - -NewNamespaceAttribute appends `xmlns:` in front of namespace -prefix. - -For creating a name space attribute representing -`xmlns:prefix="http://example.com`, the breakdown would be: -local = "prefix" -value = "http://example.com" -*/ -func NewNamespaceAttribute(local, value string) Attr { - attr := NewAttribute(local, value) - - // default name space identifier - attr.Name.Space = "xmlns" - return attr -} - -// A StartElement represents an XML start element. -type StartElement struct { - Name Name - Attr []Attr -} - -// Copy creates a new copy of StartElement. -func (e StartElement) Copy() StartElement { - attrs := make([]Attr, len(e.Attr)) - copy(attrs, e.Attr) - e.Attr = attrs - return e -} - -// End returns the corresponding XML end element. -func (e StartElement) End() EndElement { - return EndElement{e.Name} -} - -// returns true if start element local name is empty -func (e StartElement) isZero() bool { - return len(e.Name.Local) == 0 -} - -// An EndElement represents an XML end element. -type EndElement struct { - Name Name -} - -// returns true if end element local name is empty -func (e EndElement) isZero() bool { - return len(e.Name.Local) == 0 -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go deleted file mode 100644 index 16fb3dddb..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -// writer interface used by the xml encoder to write an encoded xml -// document in a writer. -type writer interface { - - // Write takes in a byte slice and returns number of bytes written and error - Write(p []byte) (n int, err error) - - // WriteRune takes in a rune and returns number of bytes written and error - WriteRune(r rune) (n int, err error) - - // WriteString takes in a string and returns number of bytes written and error - WriteString(s string) (n int, err error) - - // String method returns a string - String() string - - // Bytes return a byte slice. - Bytes() []byte -} - -// Encoder is an XML encoder that supports construction of XML values -// using methods. The encoder takes in a writer and maintains a scratch buffer. -type Encoder struct { - w writer - scratch *[]byte -} - -// NewEncoder returns an XML encoder -func NewEncoder(w writer) *Encoder { - scratch := make([]byte, 64) - - return &Encoder{w: w, scratch: &scratch} -} - -// String returns the string output of the XML encoder -func (e Encoder) String() string { - return e.w.String() -} - -// Bytes returns the []byte slice of the XML encoder -func (e Encoder) Bytes() []byte { - return e.w.Bytes() -} - -// RootElement builds a root element encoding -// It writes it's start element tag. The value should be closed. -func (e Encoder) RootElement(element StartElement) Value { - return newValue(e.w, e.scratch, element) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go deleted file mode 100644 index f3db6ccca..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "io" -) - -// ErrorComponents represents the error response fields -// that will be deserialized from an xml error response body -type ErrorComponents struct { - Code string - Message string -} - -// GetErrorResponseComponents returns the error fields from an xml error response body -func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { - if noErrorWrapping { - var errResponse noWrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil - } - - var errResponse wrappedErrorResponse - if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { - return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) - } - return ErrorComponents{ - Code: errResponse.Code, - Message: errResponse.Message, - }, nil -} - -// noWrappedErrorResponse represents the error response body with -// no internal ... -type wrappedErrorResponse struct { - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go deleted file mode 100644 index 1c5479af6..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Copied and modified from Go 1.14 stdlib's encoding/xml - -package xml - -import ( - "unicode/utf8" -) - -// Copied from Go 1.14 stdlib's encoding/xml -var ( - escQuot = []byte(""") // shorter than """ - escApos = []byte("'") // shorter than "'" - escAmp = []byte("&") - escLT = []byte("<") - escGT = []byte(">") - escTab = []byte(" ") - escNL = []byte(" ") - escCR = []byte(" ") - escFFFD = []byte("\uFFFD") // Unicode replacement character - - // Additional Escapes - escNextLine = []byte("…") - escLS = []byte("
") -) - -// Decide whether the given rune is in the XML Character Range, per -// the Char production of https://www.xml.com/axml/testaxml.htm, -// Section 2.2 Characters. -func isInCharacterRange(r rune) (inrange bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} - -// TODO: When do we need to escape the string? -// Based on encoding/xml escapeString from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeString(e writer, s string) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRuneInString(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.WriteString(s[last : i-width]) - e.Write(esc) - last = i - } - e.WriteString(s[last:]) -} - -// escapeText writes to w the properly escaped XML equivalent -// of the plain text data s. If escapeNewline is true, newline -// characters will be escaped. -// -// Based on encoding/xml escapeText from the Go Standard Library. -// https://golang.org/src/encoding/xml/xml.go -func escapeText(e writer, s []byte) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRune(s[i:]) - i += width - switch r { - case '"': - esc = escQuot - case '\'': - esc = escApos - case '&': - esc = escAmp - case '<': - esc = escLT - case '>': - esc = escGT - case '\t': - esc = escTab - case '\n': - // This always escapes newline, which is different than stdlib's optional - // escape of new line. - esc = escNL - case '\r': - esc = escCR - case '\u0085': - // Not escaped by stdlib - esc = escNextLine - case '\u2028': - // Not escaped by stdlib - esc = escLS - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = escFFFD - break - } - continue - } - e.Write(s[last : i-width]) - e.Write(esc) - last = i - } - e.Write(s[last:]) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go deleted file mode 100644 index e42858965..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/map.go +++ /dev/null @@ -1,53 +0,0 @@ -package xml - -// mapEntryWrapper is the default member wrapper start element for XML Map entry -var mapEntryWrapper = StartElement{ - Name: Name{Local: "entry"}, -} - -// Map represents the encoding of a XML map type -type Map struct { - w writer - scratch *[]byte - - // member start element is the map entry wrapper start element - memberStartElement StartElement - - // isFlattened returns true if the map is a flattened map - isFlattened bool -} - -// newMap returns a map encoder which sets the default map -// entry wrapper to `entry`. -// -// A map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newMap(w writer, scratch *[]byte) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: mapEntryWrapper, - } -} - -// newFlattenedMap returns a map encoder which sets the map -// entry wrapper to the passed in memberWrapper`. -// -// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as -// `abc123`. -func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { - return &Map{ - w: w, - scratch: scratch, - memberStartElement: memberWrapper, - isFlattened: true, - } -} - -// Entry returns a Value encoder with map's element. -// It writes the member wrapper start tag for each entry. -func (m *Map) Entry() Value { - v := newValue(m.w, m.scratch, m.memberStartElement) - v.isFlattened = m.isFlattened - return v -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go deleted file mode 100644 index 09434b2c0..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/value.go +++ /dev/null @@ -1,302 +0,0 @@ -package xml - -import ( - "encoding/base64" - "fmt" - "math/big" - "strconv" - - "github.com/aws/smithy-go/encoding" -) - -// Value represents an XML Value type -// XML Value types: Object, Array, Map, String, Number, Boolean. -type Value struct { - w writer - scratch *[]byte - - // xml start element is the associated start element for the Value - startElement StartElement - - // indicates if the Value represents a flattened shape - isFlattened bool -} - -// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag -func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { - return Value{ - w: w, - scratch: scratch, - startElement: startElement, - } -} - -// newValue writes the start element xml tag and returns a Value -func newValue(w writer, scratch *[]byte, startElement StartElement) Value { - writeStartElement(w, startElement) - return Value{w: w, scratch: scratch, startElement: startElement} -} - -// writeStartElement takes in a start element and writes it. -// It handles namespace, attributes in start element. -func writeStartElement(w writer, el StartElement) error { - if el.isZero() { - return fmt.Errorf("xml start element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - for _, attr := range el.Attr { - w.WriteRune(' ') - writeAttribute(w, &attr) - } - - w.WriteRune(rightAngleBracket) - return nil -} - -// writeAttribute writes an attribute from a provided Attribute -// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". -// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName -func writeAttribute(w writer, attr *Attr) { - // if local, space both are not empty - if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { - escapeString(w, attr.Name.Space) - w.WriteRune(colon) - } - - // if prefix is empty, the default `xmlns` space should be used as prefix. - if len(attr.Name.Local) == 0 { - attr.Name.Local = attr.Name.Space - } - - escapeString(w, attr.Name.Local) - w.WriteRune(equals) - w.WriteRune(quote) - escapeString(w, attr.Value) - w.WriteRune(quote) -} - -// writeEndElement takes in a end element and writes it. -func writeEndElement(w writer, el EndElement) error { - if el.isZero() { - return fmt.Errorf("xml end element cannot be nil") - } - - w.WriteRune(leftAngleBracket) - w.WriteRune(forwardSlash) - - if len(el.Name.Space) != 0 { - escapeString(w, el.Name.Space) - w.WriteRune(colon) - } - escapeString(w, el.Name.Local) - w.WriteRune(rightAngleBracket) - - return nil -} - -// String encodes v as a XML string. -// It will auto close the parent xml element tag. -func (xv Value) String(v string) { - escapeString(xv.w, v) - xv.Close() -} - -// Byte encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Byte(v int8) { - xv.Long(int64(v)) -} - -// Short encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Short(v int16) { - xv.Long(int64(v)) -} - -// Integer encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Integer(v int32) { - xv.Long(int64(v)) -} - -// Long encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Long(v int64) { - *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Float encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Float(v float32) { - xv.float(float64(v), 32) - xv.Close() -} - -// Double encodes v as a XML number. -// It will auto close the parent xml element tag. -func (xv Value) Double(v float64) { - xv.float(v, 64) - xv.Close() -} - -func (xv Value) float(v float64, bits int) { - *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) - xv.w.Write(*xv.scratch) -} - -// Boolean encodes v as a XML boolean. -// It will auto close the parent xml element tag. -func (xv Value) Boolean(v bool) { - *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) - xv.w.Write(*xv.scratch) - - xv.Close() -} - -// Base64EncodeBytes writes v as a base64 value in XML string. -// It will auto close the parent xml element tag. -func (xv Value) Base64EncodeBytes(v []byte) { - encodeByteSlice(xv.w, (*xv.scratch)[:0], v) - xv.Close() -} - -// BigInteger encodes v big.Int as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigInteger(v *big.Int) { - xv.w.Write([]byte(v.Text(10))) - xv.Close() -} - -// BigDecimal encodes v big.Float as XML value. -// It will auto close the parent xml element tag. -func (xv Value) BigDecimal(v *big.Float) { - if i, accuracy := v.Int64(); accuracy == big.Exact { - xv.Long(i) - return - } - - xv.w.Write([]byte(v.Text('e', -1))) - xv.Close() -} - -// Write writes v directly to the xml document -// if escapeXMLText is set to true, write will escape text. -// It will auto close the parent xml element tag. -func (xv Value) Write(v []byte, escapeXMLText bool) { - // escape and write xml text - if escapeXMLText { - escapeText(xv.w, v) - } else { - // write xml directly - xv.w.Write(v) - } - - xv.Close() -} - -// MemberElement does member element encoding. It returns a Value. -// Member Element method should be used for all shapes except flattened shapes. -// -// A call to MemberElement will write nested element tags directly using the -// provided start element. The value returned by MemberElement should be closed. -func (xv Value) MemberElement(element StartElement) Value { - return newValue(xv.w, xv.scratch, element) -} - -// FlattenedElement returns flattened element encoding. It returns a Value. -// This method should be used for flattened shapes. -// -// Unlike MemberElement, flattened element will NOT write element tags -// directly for the associated start element. -// -// The value returned by the FlattenedElement does not need to be closed. -func (xv Value) FlattenedElement(element StartElement) Value { - v := newFlattenedValue(xv.w, xv.scratch, element) - v.isFlattened = true - return v -} - -// Array returns an array encoder. By default, the members of array are -// wrapped with `` element tag. -// If value is marked as flattened, the start element is used to wrap the members instead of -// the `` element. -func (xv Value) Array() *Array { - return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) -} - -/* -ArrayWithCustomName returns an array encoder. - -It takes named start element as an argument, the named start element will used to wrap xml array entries. -for eg, `entry1` -Here `customName` named start element will be wrapped on each array member. -*/ -func (xv Value) ArrayWithCustomName(element StartElement) *Array { - return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) -} - -/* -Map returns a map encoder. By default, the map entries are -wrapped with `` element tag. - -If value is marked as flattened, the start element is used to wrap the entry instead of -the `` element. -*/ -func (xv Value) Map() *Map { - // flattened map - if xv.isFlattened { - return newFlattenedMap(xv.w, xv.scratch, xv.startElement) - } - - // un-flattened map - return newMap(xv.w, xv.scratch) -} - -// encodeByteSlice is modified copy of json encoder's encodeByteSlice. -// It is used to base64 encode a byte slice. -func encodeByteSlice(w writer, scratch []byte, v []byte) { - if v == nil { - return - } - - encodedLen := base64.StdEncoding.EncodedLen(len(v)) - if encodedLen <= len(scratch) { - // If the encoded bytes fit in e.scratch, avoid an extra - // allocation and use the cheaper Encoding.Encode. - dst := scratch[:encodedLen] - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else if encodedLen <= 1024 { - // The encoded bytes are short enough to allocate for, and - // Encoding.Encode is still cheaper. - dst := make([]byte, encodedLen) - base64.StdEncoding.Encode(dst, v) - w.Write(dst) - } else { - // The encoded bytes are too long to cheaply allocate, and - // Encoding.Encode is no longer noticeably cheaper. - enc := base64.NewEncoder(base64.StdEncoding, w) - enc.Write(v) - enc.Close() - } -} - -// IsFlattened returns true if value is for flattened shape. -func (xv Value) IsFlattened() bool { - return xv.isFlattened -} - -// Close closes the value. -func (xv Value) Close() { - writeEndElement(xv.w, xv.startElement.End()) -} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go deleted file mode 100644 index dc4eebdff..000000000 --- a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go +++ /dev/null @@ -1,154 +0,0 @@ -package xml - -import ( - "encoding/xml" - "fmt" - "strings" -) - -// NodeDecoder is a XML decoder wrapper that is responsible to decoding -// a single XML Node element and it's nested member elements. This wrapper decoder -// takes in the start element of the top level node being decoded. -type NodeDecoder struct { - Decoder *xml.Decoder - StartEl xml.StartElement -} - -// WrapNodeDecoder returns an initialized XMLNodeDecoder -func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { - return NodeDecoder{ - Decoder: decoder, - StartEl: startEl, - } -} - -// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the -// a token is the node decoder's end node token; and an error which indicates any error -// that occurred while retrieving the start element -func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { - for { - token, e := d.Decoder.Token() - if e != nil { - return t, done, e - } - - // check if we reach end of the node being decoded - if el, ok := token.(xml.EndElement); ok { - return t, el == d.StartEl.End(), err - } - - if t, ok := token.(xml.StartElement); ok { - return restoreAttrNamespaces(t), false, err - } - - // skip token if it is a comment or preamble or empty space value due to indentation - // or if it's a value and is not expected - } -} - -// restoreAttrNamespaces update XML attributes to restore the short namespaces found within -// the raw XML document. -func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { - if len(node.Attr) == 0 { - return node - } - - // Generate a mapping of XML namespace values to their short names. - ns := map[string]string{} - for _, a := range node.Attr { - if a.Name.Space == "xmlns" { - ns[a.Value] = a.Name.Local - break - } - } - - for i, a := range node.Attr { - if a.Name.Space == "xmlns" { - continue - } - // By default, xml.Decoder will fully resolve these namespaces. So if you had - // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to - // continue to resolve as `bar` so we can easily identify it later on. - if v, ok := ns[node.Attr[i].Name.Space]; ok { - node.Attr[i].Name.Space = v - } - } - return node -} - -// GetElement looks for the given tag name at the current level, and returns the element if found, and -// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking -// the document. -func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { - for { - token, done, err := d.Token() - if err != nil { - return t, err - } - if done { - return t, fmt.Errorf("%s node not found", name) - } - switch { - case strings.EqualFold(name, token.Name.Local): - return token, nil - default: - err = d.Decoder.Skip() - if err != nil { - return t, err - } - } - } -} - -// Value provides an abstraction to retrieve char data value within an xml element. -// The method will return an error if it encounters a nested xml element instead of char data. -// This method should only be used to retrieve simple type or blob shape values as []byte. -func (d NodeDecoder) Value() (c []byte, err error) { - t, e := d.Decoder.Token() - if e != nil { - return c, e - } - - endElement := d.StartEl.End() - - switch ev := t.(type) { - case xml.CharData: - c = ev.Copy() - case xml.EndElement: // end tag or self-closing - if ev == endElement { - return []byte{}, err - } - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - default: - return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) - } - - t, e = d.Decoder.Token() - if e != nil { - return c, e - } - - if ev, ok := t.(xml.EndElement); ok { - if ev == endElement { - return c, err - } - } - - return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) -} - -// FetchRootElement takes in a decoder and returns the first start element within the xml body. -// This function is useful in fetching the start element of an XML response and ignore the -// comments and preamble -func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { - for { - t, e := decoder.Token() - if e != nil { - return startElement, e - } - - if startElement, ok := t.(xml.StartElement); ok { - return startElement, err - } - } -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go deleted file mode 100644 index a93528397..000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ /dev/null @@ -1,23 +0,0 @@ -package transport - -import ( - "net/http" - "net/url" - - "github.com/aws/smithy-go" -) - -// Endpoint is the endpoint object returned by Endpoint resolution V2 -type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. - // May optionally specify the port and base path component. - URI url.URL - - // An optional set of headers to be sent using transport layer headers. - Headers http.Header - - // A grab-bag property map of endpoint attributes. The - // values present here are subject to change, or being add/removed at any - // time. - Properties smithy.Properties -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go deleted file mode 100644 index e24e190dc..000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package rulesfn provides endpoint rule functions for evaluating endpoint -// resolution rules. - -package rulesfn diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go deleted file mode 100644 index 5cf4a7b02..000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go +++ /dev/null @@ -1,25 +0,0 @@ -package rulesfn - -// Substring returns the substring of the input provided. If the start or stop -// indexes are not valid for the input nil will be returned. If errors occur -// they will be added to the provided [ErrorCollector]. -func SubString(input string, start, stop int, reverse bool) *string { - if start < 0 || stop < 1 || start >= stop || len(input) < stop { - return nil - } - - for _, r := range input { - if r > 127 { - return nil - } - } - - if !reverse { - v := input[start:stop] - return &v - } - - rStart := len(input) - stop - rStop := len(input) - start - return SubString(input, rStart, rStop, false) -} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go deleted file mode 100644 index 0c1154127..000000000 --- a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go +++ /dev/null @@ -1,130 +0,0 @@ -package rulesfn - -import ( - "fmt" - "net" - "net/url" - "strings" - - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// IsValidHostLabel returns if the input is a single valid [RFC 1123] host -// label. If allowSubDomains is true, will allow validation to include nested -// host labels. Returns false if the input is not a valid host label. If errors -// occur they will be added to the provided [ErrorCollector]. -// -// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt -func IsValidHostLabel(input string, allowSubDomains bool) bool { - var labels []string - if allowSubDomains { - labels = strings.Split(input, ".") - } else { - labels = []string{input} - } - - for _, label := range labels { - if !smithyhttp.ValidHostLabel(label) { - return false - } - } - - return true -} - -// ParseURL returns a [URL] if the provided string could be parsed. Returns nil -// if the string could not be parsed. Any parsing error will be added to the -// [ErrorCollector]. -// -// If the input URL string contains an IP6 address with a zone index. The -// returned [builtin.URL.Authority] value will contain the percent escaped (%) -// zone index separator. -func ParseURL(input string) *URL { - u, err := url.Parse(input) - if err != nil { - return nil - } - - if u.RawQuery != "" { - return nil - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil - } - - normalizedPath := u.Path - if !strings.HasPrefix(normalizedPath, "/") { - normalizedPath = "/" + normalizedPath - } - if !strings.HasSuffix(normalizedPath, "/") { - normalizedPath = normalizedPath + "/" - } - - // IP6 hosts may have zone indexes that need to be escaped to be valid in a - // URI. The Go URL parser will unescape the `%25` into `%`. This needs to - // be reverted since the returned URL will be used in string builders. - authority := strings.ReplaceAll(u.Host, "%", "%25") - - return &URL{ - Scheme: u.Scheme, - Authority: authority, - Path: u.Path, - NormalizedPath: normalizedPath, - IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil, - } -} - -// URL provides the structure describing the parts of a parsed URL returned by -// [ParseURL]. -type URL struct { - Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2 - Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3 - IsIp bool -} - -// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the -// input string. -// -// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 -func URIEncode(input string) string { - var output strings.Builder - for _, c := range []byte(input) { - if validPercentEncodedChar(c) { - output.WriteByte(c) - continue - } - - fmt.Fprintf(&output, "%%%X", c) - } - - return output.String() -} - -func validPercentEncodedChar(c byte) bool { - return (c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') || - c == '-' || c == '_' || c == '.' || c == '~' -} - -// hostname implements u.Hostname() but strips the ipv6 zone ID (if present) -// such that net.ParseIP can still recognize IPv6 addresses with zone IDs. -// -// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take -// that package as a dependency yet due to our min go version (1.15, netip -// starts in 1.18). When we align with go runtime deprecation policy in -// 10/2023, we can remove this. -func hostnameWithoutZone(u *url.URL) string { - full := u.Hostname() - - // this more or less mimics the internals of net/ (see unexported - // splitHostZone in that source) but throws the zone away because we don't - // need it - if i := strings.LastIndex(full, "%"); i > -1 { - return full[:i] - } - return full -} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go deleted file mode 100644 index d6948d020..000000000 --- a/vendor/github.com/aws/smithy-go/errors.go +++ /dev/null @@ -1,137 +0,0 @@ -package smithy - -import "fmt" - -// APIError provides the generic API and protocol agnostic error type all SDK -// generated exception types will implement. -type APIError interface { - error - - // ErrorCode returns the error code for the API exception. - ErrorCode() string - // ErrorMessage returns the error message for the API exception. - ErrorMessage() string - // ErrorFault returns the fault for the API exception. - ErrorFault() ErrorFault -} - -// GenericAPIError provides a generic concrete API error type that SDKs can use -// to deserialize error responses into. Should be used for unmodeled or untyped -// errors. -type GenericAPIError struct { - Code string - Message string - Fault ErrorFault -} - -// ErrorCode returns the error code for the API exception. -func (e *GenericAPIError) ErrorCode() string { return e.Code } - -// ErrorMessage returns the error message for the API exception. -func (e *GenericAPIError) ErrorMessage() string { return e.Message } - -// ErrorFault returns the fault for the API exception. -func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } - -func (e *GenericAPIError) Error() string { - return fmt.Sprintf("api error %s: %s", e.Code, e.Message) -} - -var _ APIError = (*GenericAPIError)(nil) - -// OperationError decorates an underlying error which occurred while invoking -// an operation with names of the operation and API. -type OperationError struct { - ServiceID string - OperationName string - Err error -} - -// Service returns the name of the API service the error occurred with. -func (e *OperationError) Service() string { return e.ServiceID } - -// Operation returns the name of the API operation the error occurred with. -func (e *OperationError) Operation() string { return e.OperationName } - -// Unwrap returns the nested error if any, or nil. -func (e *OperationError) Unwrap() error { return e.Err } - -func (e *OperationError) Error() string { - return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) -} - -// DeserializationError provides a wrapper for an error that occurs during -// deserialization. -type DeserializationError struct { - Err error // original error - Snapshot []byte -} - -// Error returns a formatted error for DeserializationError -func (e *DeserializationError) Error() string { - const msg = "deserialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s, %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in DeserializationError -func (e *DeserializationError) Unwrap() error { return e.Err } - -// ErrorFault provides the type for a Smithy API error fault. -type ErrorFault int - -// ErrorFault enumeration values -const ( - FaultUnknown ErrorFault = iota - FaultServer - FaultClient -) - -func (f ErrorFault) String() string { - switch f { - case FaultServer: - return "server" - case FaultClient: - return "client" - default: - return "unknown" - } -} - -// SerializationError represents an error that occurred while attempting to serialize a request -type SerializationError struct { - Err error // original error -} - -// Error returns a formatted error for SerializationError -func (e *SerializationError) Error() string { - const msg = "serialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s: %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in SerializationError -func (e *SerializationError) Unwrap() error { return e.Err } - -// CanceledError is the error that will be returned by an API request that was -// canceled. API operations given a Context may return this error when -// canceled. -type CanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*CanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *CanceledError) Unwrap() error { - return e.Err -} - -func (e *CanceledError) Error() string { - return fmt.Sprintf("canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go deleted file mode 100644 index 212eae4fa..000000000 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package smithy - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.1" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE deleted file mode 100644 index fe6a62006..000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go deleted file mode 100644 index 9c9d02b94..000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package singleflight provides a duplicate function call suppression -// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight -// package. The package is forked, because the package a part of the unstable -// and unversioned golang.org/x/sync module. -// -// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight - -package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go deleted file mode 100644 index e8a1b17d5..000000000 --- a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package singleflight - -import ( - "bytes" - "errors" - "fmt" - "runtime" - "runtime/debug" - "sync" -) - -// errGoexit indicates the runtime.Goexit was called in -// the user given function. -var errGoexit = errors.New("runtime.Goexit was called") - -// A panicError is an arbitrary value recovered from a panic -// with the stack trace during the execution of given function. -type panicError struct { - value interface{} - stack []byte -} - -// Error implements error interface. -func (p *panicError) Error() string { - return fmt.Sprintf("%v\n\n%s", p.value, p.stack) -} - -func newPanicError(v interface{}) error { - stack := debug.Stack() - - // The first line of the stack trace is of the form "goroutine N [status]:" - // but by the time the panic reaches Do the goroutine may no longer exist - // and its status will have changed. Trim out the misleading line. - if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { - stack = stack[line+1:] - } - return &panicError{value: v, stack: stack} -} - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - - // These fields are written once before the WaitGroup is done - // and are only read after the WaitGroup is done. - val interface{} - err error - - // forgotten indicates whether Forget was called with this call's key - // while the call was still in flight. - forgotten bool - - // These fields are read and written with the singleflight - // mutex held before the WaitGroup is done, and are read but - // not written after the WaitGroup is done. - dups int - chans []chan<- Result -} - -// Group represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Result holds the results of Do, so they can be passed -// on a channel. -type Result struct { - Val interface{} - Err error - Shared bool -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.mu.Unlock() - c.wg.Wait() - - if e, ok := c.err.(*panicError); ok { - panic(e) - } else if c.err == errGoexit { - runtime.Goexit() - } - return c.val, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - g.doCall(c, key, fn) - return c.val, c.err, c.dups > 0 -} - -// DoChan is like Do but returns a channel that will receive the -// results when they are ready. -// -// The returned channel will not be closed. -func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { - ch := make(chan Result, 1) - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - c.chans = append(c.chans, ch) - g.mu.Unlock() - return ch - } - c := &call{chans: []chan<- Result{ch}} - c.wg.Add(1) - g.m[key] = c - g.mu.Unlock() - - go g.doCall(c, key, fn) - - return ch -} - -// doCall handles the single call for a key. -func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - normalReturn := false - recovered := false - - // use double-defer to distinguish panic from runtime.Goexit, - // more details see https://golang.org/cl/134395 - defer func() { - // the given function invoked runtime.Goexit - if !normalReturn && !recovered { - c.err = errGoexit - } - - c.wg.Done() - g.mu.Lock() - defer g.mu.Unlock() - if !c.forgotten { - delete(g.m, key) - } - - if e, ok := c.err.(*panicError); ok { - // In order to prevent the waiting channels from being blocked forever, - // needs to ensure that this panic cannot be recovered. - if len(c.chans) > 0 { - go panic(e) - select {} // Keep this goroutine around so that it will appear in the crash dump. - } else { - panic(e) - } - } else if c.err == errGoexit { - // Already in the process of goexit, no need to call again - } else { - // Normal return - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} - } - } - }() - - func() { - defer func() { - if !normalReturn { - // Ideally, we would wait to take a stack trace until we've determined - // whether this is a panic or a runtime.Goexit. - // - // Unfortunately, the only way we can distinguish the two is to see - // whether the recover stopped the goroutine from terminating, and by - // the time we know that, the part of the stack trace relevant to the - // panic has been discarded. - if r := recover(); r != nil { - c.err = newPanicError(r) - } - } - }() - - c.val, c.err = fn() - normalReturn = true - }() - - if !normalReturn { - recovered = true - } -} - -// Forget tells the singleflight to forget about a key. Future calls -// to Do for this key will call the function rather than waiting for -// an earlier call to complete. -func (g *Group) Forget(key string) { - g.mu.Lock() - if c, ok := g.m[key]; ok { - c.forgotten = true - } - delete(g.m, key) - g.mu.Unlock() -} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go deleted file mode 100644 index f8417c15b..000000000 --- a/vendor/github.com/aws/smithy-go/io/byte.go +++ /dev/null @@ -1,12 +0,0 @@ -package io - -const ( - // Byte is 8 bits - Byte int64 = 1 - // KibiByte (KiB) is 1024 Bytes - KibiByte = Byte * 1024 - // MebiByte (MiB) is 1024 KiB - MebiByte = KibiByte * 1024 - // GibiByte (GiB) is 1024 MiB - GibiByte = MebiByte * 1024 -) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go deleted file mode 100644 index a6a33eaf5..000000000 --- a/vendor/github.com/aws/smithy-go/io/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package io provides utilities for Smithy generated API clients. -package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go deleted file mode 100644 index 07063f296..000000000 --- a/vendor/github.com/aws/smithy-go/io/reader.go +++ /dev/null @@ -1,16 +0,0 @@ -package io - -import ( - "io" -) - -// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method -// that does nothing. -type ReadSeekNopCloser struct { - io.ReadSeeker -} - -// Close does nothing. -func (ReadSeekNopCloser) Close() error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go deleted file mode 100644 index 06b476add..000000000 --- a/vendor/github.com/aws/smithy-go/io/ringbuffer.go +++ /dev/null @@ -1,94 +0,0 @@ -package io - -import ( - "bytes" - "io" -) - -// RingBuffer struct satisfies io.ReadWrite interface. -// -// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a -// revolving window. -type RingBuffer struct { - slice []byte - start int - end int - size int -} - -// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. -func NewRingBuffer(slice []byte) *RingBuffer { - ringBuf := RingBuffer{ - slice: slice, - } - return &ringBuf -} - -// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. -func (r *RingBuffer) Write(p []byte) (int, error) { - for _, b := range p { - // check if end points to invalid index, we need to circle back - if r.end == len(r.slice) { - r.end = 0 - } - // check if start points to invalid index, we need to circle back - if r.start == len(r.slice) { - r.start = 0 - } - // if ring buffer is filled, increment the start index - if r.size == len(r.slice) { - r.size-- - r.start++ - } - - r.slice[r.end] = b - r.end++ - r.size++ - } - return len(p), nil -} - -// Read copies the data on the ring buffer into the byte slice provided to the method. -// Returns the read count along with any error encountered while reading. -func (r *RingBuffer) Read(p []byte) (int, error) { - // readCount keeps track of the number of bytes read - var readCount int - for j := 0; j < len(p); j++ { - // if ring buffer is empty or completely read - // return EOF error. - if r.size == 0 { - return readCount, io.EOF - } - - if r.start == len(r.slice) { - r.start = 0 - } - - p[j] = r.slice[r.start] - readCount++ - // increment the start pointer for ring buffer - r.start++ - // decrement the size of ring buffer - r.size-- - } - return readCount, nil -} - -// Len returns the number of unread bytes in the buffer. -func (r *RingBuffer) Len() int { - return r.size -} - -// Bytes returns a copy of the RingBuffer's bytes. -func (r RingBuffer) Bytes() []byte { - var b bytes.Buffer - io.Copy(&b, &r) - return b.Bytes() -} - -// Reset resets the ring buffer. -func (r *RingBuffer) Reset() { - *r = RingBuffer{ - slice: r.slice, - } -} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh deleted file mode 100644 index 800bf3769..000000000 --- a/vendor/github.com/aws/smithy-go/local-mod-replace.sh +++ /dev/null @@ -1,39 +0,0 @@ -#1/usr/bin/env bash - -PROJECT_DIR="" -SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) - -usage() { - echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 - exit 1 -} - -while getopts "hs:d:" options; do - case "${options}" in - s) - SMITHY_SOURCE_DIR=${OPTARG} - if [ "$SMITHY_SOURCE_DIR" == "" ]; then - echo "path to smithy-go source directory is required" || exit - usage - fi - ;; - d) - PROJECT_DIR=${OPTARG} - ;; - h) - usage - ;; - *) - usage - ;; - esac -done - -if [ "$PROJECT_DIR" != "" ]; then - cd $PROJECT_DIR || exit -fi - -go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do - repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} - echo -replace $x=$repPath -done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go deleted file mode 100644 index 2071924bd..000000000 --- a/vendor/github.com/aws/smithy-go/logging/logger.go +++ /dev/null @@ -1,82 +0,0 @@ -package logging - -import ( - "context" - "io" - "log" -) - -// Classification is the type of the log entry's classification name. -type Classification string - -// Set of standard classifications that can be used by clients and middleware -const ( - Warn Classification = "WARN" - Debug Classification = "DEBUG" -) - -// Logger is an interface for logging entries at certain classifications. -type Logger interface { - // Logf is expected to support the standard fmt package "verbs". - Logf(classification Classification, format string, v ...interface{}) -} - -// LoggerFunc is a wrapper around a function to satisfy the Logger interface. -type LoggerFunc func(classification Classification, format string, v ...interface{}) - -// Logf delegates the logging request to the wrapped function. -func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { - f(classification, format, v...) -} - -// ContextLogger is an optional interface a Logger implementation may expose that provides -// the ability to create context aware log entries. -type ContextLogger interface { - WithContext(context.Context) Logger -} - -// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting -// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will -// be returned to the caller. -func WithContext(ctx context.Context, logger Logger) Logger { - if logger == nil { - return Nop{} - } - - cl, ok := logger.(ContextLogger) - if !ok { - return logger - } - - return cl.WithContext(ctx) -} - -// Nop is a Logger implementation that simply does not perform any logging. -type Nop struct{} - -// Logf simply returns without performing any action -func (n Nop) Logf(Classification, string, ...interface{}) { - return -} - -// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's -// Printf method. -type StandardLogger struct { - Logger *log.Logger -} - -// Logf logs the given classification and message to the underlying logger. -func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { - if len(classification) != 0 { - format = string(classification) + " " + format - } - - s.Logger.Printf(format, v...) -} - -// NewStandardLogger returns a new StandardLogger -func NewStandardLogger(writer io.Writer) *StandardLogger { - return &StandardLogger{ - Logger: log.New(writer, "SDK ", log.LstdFlags), - } -} diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go deleted file mode 100644 index c009d9f27..000000000 --- a/vendor/github.com/aws/smithy-go/metrics/metrics.go +++ /dev/null @@ -1,136 +0,0 @@ -// Package metrics defines the metrics APIs used by Smithy clients. -package metrics - -import ( - "context" - - "github.com/aws/smithy-go" -) - -// MeterProvider is the entry point for creating a Meter. -type MeterProvider interface { - Meter(scope string, opts ...MeterOption) Meter -} - -// MeterOption applies configuration to a Meter. -type MeterOption func(o *MeterOptions) - -// MeterOptions represents configuration for a Meter. -type MeterOptions struct { - Properties smithy.Properties -} - -// Meter is the entry point for creation of measurement instruments. -type Meter interface { - // integer/synchronous - Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error) - Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error) - Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error) - Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) - - // integer/asynchronous - Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - - // floating-point/synchronous - Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error) - Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error) - Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error) - Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) - - // floating-point/asynchronous - Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) - Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) -} - -// InstrumentOption applies configuration to an instrument. -type InstrumentOption func(o *InstrumentOptions) - -// InstrumentOptions represents configuration for an instrument. -type InstrumentOptions struct { - UnitLabel string - Description string -} - -// Int64Counter measures a monotonically increasing int64 value. -type Int64Counter interface { - Add(context.Context, int64, ...RecordMetricOption) -} - -// Int64UpDownCounter measures a fluctuating int64 value. -type Int64UpDownCounter interface { - Add(context.Context, int64, ...RecordMetricOption) -} - -// Int64Gauge samples a discrete int64 value. -type Int64Gauge interface { - Sample(context.Context, int64, ...RecordMetricOption) -} - -// Int64Histogram records multiple data points for an int64 value. -type Int64Histogram interface { - Record(context.Context, int64, ...RecordMetricOption) -} - -// Float64Counter measures a monotonically increasing float64 value. -type Float64Counter interface { - Add(context.Context, float64, ...RecordMetricOption) -} - -// Float64UpDownCounter measures a fluctuating float64 value. -type Float64UpDownCounter interface { - Add(context.Context, float64, ...RecordMetricOption) -} - -// Float64Gauge samples a discrete float64 value. -type Float64Gauge interface { - Sample(context.Context, float64, ...RecordMetricOption) -} - -// Float64Histogram records multiple data points for an float64 value. -type Float64Histogram interface { - Record(context.Context, float64, ...RecordMetricOption) -} - -// AsyncInstrument is the universal handle returned for creation of all async -// instruments. -// -// Callers use the Stop() API to unregister the callback passed at instrument -// creation. -type AsyncInstrument interface { - Stop() -} - -// Int64Callback describes a function invoked when an async int64 instrument is -// read. -type Int64Callback func(context.Context, Int64Observer) - -// Int64Observer is the interface passed to async int64 instruments. -// -// Callers use the Observe() API of this interface to report metrics to the -// underlying collector. -type Int64Observer interface { - Observe(context.Context, int64, ...RecordMetricOption) -} - -// Float64Callback describes a function invoked when an async float64 -// instrument is read. -type Float64Callback func(context.Context, Float64Observer) - -// Float64Observer is the interface passed to async int64 instruments. -// -// Callers use the Observe() API of this interface to report metrics to the -// underlying collector. -type Float64Observer interface { - Observe(context.Context, float64, ...RecordMetricOption) -} - -// RecordMetricOption applies configuration to a recorded metric. -type RecordMetricOption func(o *RecordMetricOptions) - -// RecordMetricOptions represents configuration for a recorded metric. -type RecordMetricOptions struct { - Properties smithy.Properties -} diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go deleted file mode 100644 index fb374e1fb..000000000 --- a/vendor/github.com/aws/smithy-go/metrics/nop.go +++ /dev/null @@ -1,67 +0,0 @@ -package metrics - -import "context" - -// NopMeterProvider is a no-op metrics implementation. -type NopMeterProvider struct{} - -var _ MeterProvider = (*NopMeterProvider)(nil) - -// Meter returns a meter which creates no-op instruments. -func (NopMeterProvider) Meter(string, ...MeterOption) Meter { - return nopMeter{} -} - -type nopMeter struct{} - -var _ Meter = (*nopMeter)(nil) - -func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[int64]{}, nil -} -func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} -func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { - return nopInstrument[float64]{}, nil -} - -type nopInstrument[N any] struct{} - -func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {} -func (nopInstrument[_]) Stop() {} diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go deleted file mode 100644 index f51aa4f04..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/context.go +++ /dev/null @@ -1,41 +0,0 @@ -package middleware - -import "context" - -type ( - serviceIDKey struct{} - operationNameKey struct{} -) - -// WithServiceID adds a service ID to the context, scoped to middleware stack -// values. -// -// This API is called in the client runtime when bootstrapping an operation and -// should not typically be used directly. -func WithServiceID(parent context.Context, id string) context.Context { - return WithStackValue(parent, serviceIDKey{}, id) -} - -// GetServiceID retrieves the service ID from the context. This is typically -// the service shape's name from its Smithy model. Service clients for specific -// systems (e.g. AWS SDK) may use an alternate designated value. -func GetServiceID(ctx context.Context) string { - id, _ := GetStackValue(ctx, serviceIDKey{}).(string) - return id -} - -// WithOperationName adds the operation name to the context, scoped to -// middleware stack values. -// -// This API is called in the client runtime when bootstrapping an operation and -// should not typically be used directly. -func WithOperationName(parent context.Context, id string) context.Context { - return WithStackValue(parent, operationNameKey{}, id) -} - -// GetOperationName retrieves the operation name from the context. This is -// typically the operation shape's name from its Smithy model. -func GetOperationName(ctx context.Context) string { - name, _ := GetStackValue(ctx, operationNameKey{}).(string) - return name -} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go deleted file mode 100644 index 9858928a7..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/doc.go +++ /dev/null @@ -1,67 +0,0 @@ -// Package middleware provides transport agnostic middleware for decorating SDK -// handlers. -// -// The Smithy middleware stack provides ordered behavior to be invoked on an -// underlying handler. The stack is separated into steps that are invoked in a -// static order. A step is a collection of middleware that are injected into a -// ordered list defined by the user. The user may add, insert, swap, and remove a -// step's middleware. When the stack is invoked the step middleware become static, -// and their order cannot be modified. -// -// A stack and its step middleware are **not** safe to modify concurrently. -// -// A stack will use the ordered list of middleware to decorate a underlying -// handler. A handler could be something like an HTTP Client that round trips an -// API operation over HTTP. -// -// Smithy Middleware Stack -// -// A Stack is a collection of middleware that wrap a handler. The stack can be -// broken down into discreet steps. Each step may contain zero or more middleware -// specific to that stack's step. -// -// A Stack Step is a predefined set of middleware that are invoked in a static -// order by the Stack. These steps represent fixed points in the middleware stack -// for organizing specific behavior, such as serialize and build. A Stack Step is -// composed of zero or more middleware that are specific to that step. A step may -// define its own set of input/output parameters the generic input/output -// parameters are cast from. A step calls its middleware recursively, before -// calling the next step in the stack returning the result or error of the step -// middleware decorating the underlying handler. -// -// * Initialize: Prepares the input, and sets any default parameters as needed, -// (e.g. idempotency token, and presigned URLs). -// -// * Serialize: Serializes the prepared input into a data structure that can be -// consumed by the target transport's message, (e.g. REST-JSON serialization). -// -// * Build: Adds additional metadata to the serialized transport message, (e.g. -// HTTP's Content-Length header, or body checksum). Decorations and -// modifications to the message should be copied to all message attempts. -// -// * Finalize: Performs final preparations needed before sending the message. The -// message should already be complete by this stage, and is only alternated to -// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request -// signing). -// -// * Deserialize: Reacts to the handler's response returned by the recipient of -// the request message. Deserializes the response into a structured type or -// error above stacks can react to. -// -// Adding Middleware to a Stack Step -// -// Middleware can be added to a step front or back, or relative, by name, to an -// existing middleware in that stack. If a middleware does not have a name, a -// unique name will be generated at the middleware and be added to the step. -// -// // Create middleware stack -// stack := middleware.NewStack() -// -// // Add middleware to stack steps -// stack.Initialize.Add(paramValidationMiddleware, middleware.After) -// stack.Serialize.Add(marshalOperationFoo, middleware.After) -// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) -// -// // Invoke middleware on handler. -// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) -package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go deleted file mode 100644 index c2f0dbb6b..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/logging.go +++ /dev/null @@ -1,46 +0,0 @@ -package middleware - -import ( - "context" - - "github.com/aws/smithy-go/logging" -) - -// loggerKey is the context value key for which the logger is associated with. -type loggerKey struct{} - -// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger -// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed -// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. -func GetLogger(ctx context.Context) logging.Logger { - logger, ok := ctx.Value(loggerKey{}).(logging.Logger) - if !ok || logger == nil { - return logging.Nop{} - } - - return logging.WithContext(ctx, logger) -} - -// SetLogger sets the provided logger value on the provided ctx. -func SetLogger(ctx context.Context, logger logging.Logger) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -type setLogger struct { - Logger logging.Logger -} - -// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. -func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { - return stack.Initialize.Add(&setLogger{Logger: logger}, After) -} - -func (a *setLogger) ID() string { - return "SetLogger" -} - -func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return next.HandleInitialize(SetLogger(ctx, a.Logger), in) -} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go deleted file mode 100644 index 7bb7dbcf5..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/metadata.go +++ /dev/null @@ -1,65 +0,0 @@ -package middleware - -// MetadataReader provides an interface for reading metadata from the -// underlying metadata container. -type MetadataReader interface { - Get(key interface{}) interface{} -} - -// Metadata provides storing and reading metadata values. Keys may be any -// comparable value type. Get and set will panic if key is not a comparable -// value type. -// -// Metadata uses lazy initialization, and Set method must be called as an -// addressable value, or pointer. Not doing so may cause key/value pair to not -// be set. -type Metadata struct { - values map[interface{}]interface{} -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m Metadata) Get(key interface{}) interface{} { - return m.values[key] -} - -// Clone creates a shallow copy of Metadata entries, returning a new Metadata -// value with the original entries copied into it. -func (m Metadata) Clone() Metadata { - vs := make(map[interface{}]interface{}, len(m.values)) - for k, v := range m.values { - vs[k] = v - } - - return Metadata{ - values: vs, - } -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Set method must be called as an addressable value, or pointer. If Set is not -// called as an addressable value or pointer, the key value pair being set may -// be lost. -// -// Panics if the key type is not comparable. -func (m *Metadata) Set(key, value interface{}) { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m Metadata) Has(key interface{}) bool { - if m.values == nil { - return false - } - _, ok := m.values[key] - return ok -} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go deleted file mode 100644 index 803b7c751..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/middleware.go +++ /dev/null @@ -1,71 +0,0 @@ -package middleware - -import ( - "context" -) - -// Handler provides the interface for performing the logic to obtain an output, -// or error for the given input. -type Handler interface { - // Handle performs logic to obtain an output for the given input. Handler - // should be decorated with middleware to perform input specific behavior. - Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, - ) -} - -// HandlerFunc provides a wrapper around a function pointer to be used as a -// middleware handler. -type HandlerFunc func(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) - -// Handle invokes the underlying function, returning the result. -func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return fn(ctx, input) -} - -// Middleware provides the interface to call handlers in a chain. -type Middleware interface { - // ID provides a unique identifier for the middleware. - ID() string - - // Performs the middleware's handling of the input, returning the output, - // or error. The middleware can invoke the next Handler if handling should - // continue. - HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, - ) -} - -// decoratedHandler wraps a middleware in order to to call the next handler in -// the chain. -type decoratedHandler struct { - // The next handler to be called. - Next Handler - - // The current middleware decorating the handler. - With Middleware -} - -// Handle implements the Handler interface to handle a operation invocation. -func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( - output interface{}, metadata Metadata, err error, -) { - return m.With.HandleMiddleware(ctx, input, m.Next) -} - -// DecorateHandler decorates a handler with a middleware. Wrapping the handler -// with the middleware. -func DecorateHandler(h Handler, with ...Middleware) Handler { - for i := len(with) - 1; i >= 0; i-- { - h = decoratedHandler{ - Next: h, - With: with[i], - } - } - - return h -} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go deleted file mode 100644 index 4b195308c..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go +++ /dev/null @@ -1,268 +0,0 @@ -package middleware - -import "fmt" - -// RelativePosition provides specifying the relative position of a middleware -// in an ordered group. -type RelativePosition int - -// Relative position for middleware in steps. -const ( - After RelativePosition = iota - Before -) - -type ider interface { - ID() string -} - -// orderedIDs provides an ordered collection of items with relative ordering -// by name. -type orderedIDs struct { - order *relativeOrder - items map[string]ider -} - -const baseOrderedItems = 5 - -func newOrderedIDs() *orderedIDs { - return &orderedIDs{ - order: newRelativeOrder(), - items: make(map[string]ider, baseOrderedItems), - } -} - -// Add injects the item to the relative position of the item group. Returns an -// error if the item already exists. -func (g *orderedIDs) Add(m ider, pos RelativePosition) error { - id := m.ID() - if len(id) == 0 { - return fmt.Errorf("empty ID, ID must not be empty") - } - - if err := g.order.Add(pos, id); err != nil { - return err - } - - g.items[id] = m - return nil -} - -// Insert injects the item relative to an existing item id. Returns an error if -// the original item does not exist, or the item being added already exists. -func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { - if len(m.ID()) == 0 { - return fmt.Errorf("insert ID must not be empty") - } - if len(relativeTo) == 0 { - return fmt.Errorf("relative to ID must not be empty") - } - - if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { - return err - } - - g.items[m.ID()] = m - return nil -} - -// Get returns the ider identified by id. If ider is not present, returns false. -func (g *orderedIDs) Get(id string) (ider, bool) { - v, ok := g.items[id] - return v, ok -} - -// Swap removes the item by id, replacing it with the new item. Returns an error -// if the original item doesn't exist. -func (g *orderedIDs) Swap(id string, m ider) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("swap from ID must not be empty") - } - - iderID := m.ID() - if len(iderID) == 0 { - return nil, fmt.Errorf("swap to ID must not be empty") - } - - if err := g.order.Swap(id, iderID); err != nil { - return nil, err - } - - removed := g.items[id] - - delete(g.items, id) - g.items[iderID] = m - - return removed, nil -} - -// Remove removes the item by id. Returns an error if the item -// doesn't exist. -func (g *orderedIDs) Remove(id string) (ider, error) { - if len(id) == 0 { - return nil, fmt.Errorf("remove ID must not be empty") - } - - if err := g.order.Remove(id); err != nil { - return nil, err - } - - removed := g.items[id] - delete(g.items, id) - return removed, nil -} - -func (g *orderedIDs) List() []string { - items := g.order.List() - order := make([]string, len(items)) - copy(order, items) - return order -} - -// Clear removes all entries and slots. -func (g *orderedIDs) Clear() { - g.order.Clear() - g.items = map[string]ider{} -} - -// GetOrder returns the item in the order it should be invoked in. -func (g *orderedIDs) GetOrder() []interface{} { - order := g.order.List() - ordered := make([]interface{}, len(order)) - for i := 0; i < len(order); i++ { - ordered[i] = g.items[order[i]] - } - - return ordered -} - -// relativeOrder provides ordering of item -type relativeOrder struct { - order []string -} - -func newRelativeOrder() *relativeOrder { - return &relativeOrder{ - order: make([]string, 0, baseOrderedItems), - } -} - -// Add inserts an item into the order relative to the position provided. -func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - switch pos { - case Before: - return s.insert(0, Before, ids...) - - case After: - s.order = append(s.order, ids...) - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -// Insert injects an item before or after the relative item. Returns -// an error if the relative item does not exist. -func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { - if len(ids) == 0 { - return nil - } - - for _, id := range ids { - if _, ok := s.has(id); ok { - return fmt.Errorf("already exists, %v", id) - } - } - - i, ok := s.has(relativeTo) - if !ok { - return fmt.Errorf("not found, %v", relativeTo) - } - - return s.insert(i, pos, ids...) -} - -// Swap will replace the item id with the to item. Returns an -// error if the original item id does not exist. Allows swapping out an -// item for another item with the same id. -func (s *relativeOrder) Swap(id, to string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - if _, ok = s.has(to); ok && id != to { - return fmt.Errorf("already exists, %v", to) - } - - s.order[i] = to - return nil -} - -func (s *relativeOrder) Remove(id string) error { - i, ok := s.has(id) - if !ok { - return fmt.Errorf("not found, %v", id) - } - - s.order = append(s.order[:i], s.order[i+1:]...) - return nil -} - -func (s *relativeOrder) List() []string { - return s.order -} - -func (s *relativeOrder) Clear() { - s.order = s.order[0:0] -} - -func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { - switch pos { - case Before: - n := len(ids) - var src []string - if n <= cap(s.order)-len(s.order) { - s.order = s.order[:len(s.order)+n] - src = s.order - } else { - src = s.order - s.order = make([]string, len(s.order)+n) - copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half - } - copy(s.order[i+n:], src[i:]) - copy(s.order[i:], ids) - case After: - if i == len(s.order)-1 || len(s.order) == 0 { - s.order = append(s.order, ids...) - } else { - s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) - } - - default: - return fmt.Errorf("invalid position, %v", int(pos)) - } - - return nil -} - -func (s *relativeOrder) has(id string) (i int, found bool) { - for i := 0; i < len(s.order); i++ { - if s.order[i] == id { - return i, true - } - } - return 0, false -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go deleted file mode 100644 index 45ccb5b93..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack.go +++ /dev/null @@ -1,209 +0,0 @@ -package middleware - -import ( - "context" - "io" - "strings" -) - -// Stack provides protocol and transport agnostic set of middleware split into -// distinct steps. Steps have specific transitions between them, that are -// managed by the individual step. -// -// Steps are composed as middleware around the underlying handler in the -// following order: -// -// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler -// -// Any middleware within the chain may choose to stop and return an error or -// response. Since the middleware decorate the handler like a call stack, each -// middleware will receive the result of the next middleware in the chain. -// Middleware that does not need to react to an input, or result must forward -// along the input down the chain, or return the result back up the chain. -// -// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler -type Stack struct { - // Initialize prepares the input, and sets any default parameters as - // needed, (e.g. idempotency token, and presigned URLs). - // - // Takes Input Parameters, and returns result or error. - // - // Receives result or error from Serialize step. - Initialize *InitializeStep - - // Serialize serializes the prepared input into a data structure that can be consumed - // by the target transport's message, (e.g. REST-JSON serialization) - // - // Converts Input Parameters into a Request, and returns the result or error. - // - // Receives result or error from Build step. - Serialize *SerializeStep - - // Build adds additional metadata to the serialized transport message - // (e.g. HTTP's Content-Length header, or body checksum). Decorations and - // modifications to the message should be copied to all message attempts. - // - // Takes Request, and returns result or error. - // - // Receives result or error from Finalize step. - Build *BuildStep - - // Finalize performs final preparations needed before sending the message. The - // message should already be complete by this stage, and is only alternated - // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 - // request signing) - // - // Takes Request, and returns result or error. - // - // Receives result or error from Deserialize step. - Finalize *FinalizeStep - - // Deserialize reacts to the handler's response returned by the recipient of the request - // message. Deserializes the response into a structured type or error above - // stacks can react to. - // - // Should only forward Request to underlying handler. - // - // Takes Request, and returns result or error. - // - // Receives raw response, or error from underlying handler. - Deserialize *DeserializeStep - - id string -} - -// NewStack returns an initialize empty stack. -func NewStack(id string, newRequestFn func() interface{}) *Stack { - return &Stack{ - id: id, - Initialize: NewInitializeStep(), - Serialize: NewSerializeStep(newRequestFn), - Build: NewBuildStep(), - Finalize: NewFinalizeStep(), - Deserialize: NewDeserializeStep(), - } -} - -// ID returns the unique ID for the stack as a middleware. -func (s *Stack) ID() string { return s.id } - -// HandleMiddleware invokes the middleware stack decorating the next handler. -// Each step of stack will be invoked in order before calling the next step. -// With the next handler call last. -// -// The input value must be the input parameters of the operation being -// performed. -// -// Will return the result of the operation, or error. -func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( - output interface{}, metadata Metadata, err error, -) { - h := DecorateHandler(next, - s.Initialize, - s.Serialize, - s.Build, - s.Finalize, - s.Deserialize, - ) - - return h.Handle(ctx, input) -} - -// List returns a list of all middleware in the stack by step. -func (s *Stack) List() []string { - var l []string - l = append(l, s.id) - - l = append(l, s.Initialize.ID()) - l = append(l, s.Initialize.List()...) - - l = append(l, s.Serialize.ID()) - l = append(l, s.Serialize.List()...) - - l = append(l, s.Build.ID()) - l = append(l, s.Build.List()...) - - l = append(l, s.Finalize.ID()) - l = append(l, s.Finalize.List()...) - - l = append(l, s.Deserialize.ID()) - l = append(l, s.Deserialize.List()...) - - return l -} - -func (s *Stack) String() string { - var b strings.Builder - - w := &indentWriter{w: &b} - - w.WriteLine(s.id) - w.Push() - - writeStepItems(w, s.Initialize) - writeStepItems(w, s.Serialize) - writeStepItems(w, s.Build) - writeStepItems(w, s.Finalize) - writeStepItems(w, s.Deserialize) - - return b.String() -} - -type stackStepper interface { - ID() string - List() []string -} - -func writeStepItems(w *indentWriter, s stackStepper) { - type lister interface { - List() []string - } - - w.WriteLine(s.ID()) - w.Push() - - defer w.Pop() - - // ignore stack to prevent circular iterations - if _, ok := s.(*Stack); ok { - return - } - - for _, id := range s.List() { - w.WriteLine(id) - } -} - -type stringWriter interface { - io.Writer - WriteString(string) (int, error) - WriteRune(rune) (int, error) -} - -type indentWriter struct { - w stringWriter - depth int -} - -const indentDepth = "\t\t\t\t\t\t\t\t\t\t" - -func (w *indentWriter) Push() { - w.depth++ -} - -func (w *indentWriter) Pop() { - w.depth-- - if w.depth < 0 { - w.depth = 0 - } -} - -func (w *indentWriter) WriteLine(v string) { - w.w.WriteString(indentDepth[:w.depth]) - - v = strings.ReplaceAll(v, "\n", "\\n") - v = strings.ReplaceAll(v, "\r", "\\r") - - w.w.WriteString(v) - w.w.WriteRune('\n') -} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go deleted file mode 100644 index ef96009ba..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/stack_values.go +++ /dev/null @@ -1,100 +0,0 @@ -package middleware - -import ( - "context" - "reflect" - "strings" -) - -// WithStackValue adds a key value pair to the context that is intended to be -// scoped to a stack. Use ClearStackValues to get a new context with all stack -// values cleared. -func WithStackValue(ctx context.Context, key, value interface{}) context.Context { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - - md = withStackValue(md, key, value) - return context.WithValue(ctx, stackValuesKey{}, md) -} - -// ClearStackValues returns a context without any stack values. -func ClearStackValues(ctx context.Context) context.Context { - return context.WithValue(ctx, stackValuesKey{}, nil) -} - -// GetStackValues returns the value pointed to by the key within the stack -// values, if it is present. -func GetStackValue(ctx context.Context, key interface{}) interface{} { - md, _ := ctx.Value(stackValuesKey{}).(*stackValues) - if md == nil { - return nil - } - - return md.Value(key) -} - -type stackValuesKey struct{} - -type stackValues struct { - key interface{} - value interface{} - parent *stackValues -} - -func withStackValue(parent *stackValues, key, value interface{}) *stackValues { - if key == nil { - panic("nil key") - } - if !reflect.TypeOf(key).Comparable() { - panic("key is not comparable") - } - return &stackValues{key: key, value: value, parent: parent} -} - -func (m *stackValues) Value(key interface{}) interface{} { - if key == m.key { - return m.value - } - - if m.parent == nil { - return nil - } - - return m.parent.Value(key) -} - -func (c *stackValues) String() string { - var str strings.Builder - - cc := c - for cc == nil { - str.WriteString("(" + - reflect.TypeOf(c.key).String() + - ": " + - stringify(cc.value) + - ")") - if cc.parent != nil { - str.WriteString(" -> ") - } - cc = cc.parent - } - str.WriteRune('}') - - return str.String() -} - -type stringer interface { - String() string -} - -// stringify tries a bit to stringify v, without using fmt, since we don't -// want context depending on the unicode tables. This is only used by -// *valueCtx.String(). -func stringify(v interface{}) string { - switch s := v.(type) { - case stringer: - return s.String() - case string: - return s - } - return "" -} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go deleted file mode 100644 index 7e1d94cae..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_build.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import ( - "context" -) - -// BuildInput provides the input parameters for the BuildMiddleware to consume. -// BuildMiddleware may modify the Request value before forwarding the input -// along to the next BuildHandler. -type BuildInput struct { - Request interface{} -} - -// BuildOutput provides the result returned by the next BuildHandler. -type BuildOutput struct { - Result interface{} -} - -// BuildHandler provides the interface for the next handler the -// BuildMiddleware will call in the middleware chain. -type BuildHandler interface { - HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next BuildHandler for further -// processing. -type BuildMiddleware interface { - // Unique ID for the middleware in theBuildStep. The step does not allow - // duplicate IDs. - ID() string - - // Invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, - ) -} - -// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, -// and the func to be invoked. -func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { - return buildMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type buildMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) -} - -// ID returns the unique ID for the middleware. -func (s buildMiddlewareFunc) ID() string { return s.id } - -// HandleBuild invokes the middleware Fn. -func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( - out BuildOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ BuildMiddleware = (buildMiddlewareFunc{}) - -// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on -// a handler. -type BuildStep struct { - ids *orderedIDs -} - -// NewBuildStep returns a BuildStep ready to have middleware for -// initialization added to it. -func NewBuildStep() *BuildStep { - return &BuildStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*BuildStep)(nil) - -// ID returns the unique name of the step as a middleware. -func (s *BuildStep) ID() string { - return "Build stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h BuildHandler = buildWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedBuildHandler{ - Next: h, - With: order[i].(BuildMiddleware), - } - } - - sIn := BuildInput{ - Request: in, - } - - res, metadata, err := h.HandleBuild(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(BuildMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware id. -// Returns an error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or an error if the middleware to be removed -// doesn't exist. -func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(BuildMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *BuildStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *BuildStep) Clear() { - s.ids.Clear() -} - -type buildWrapHandler struct { - Next Handler -} - -var _ BuildHandler = (*buildWrapHandler)(nil) - -// Implements BuildHandler, converts types and delegates to underlying -// generic handler. -func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return BuildOutput{ - Result: res, - }, metadata, err -} - -type decoratedBuildHandler struct { - Next BuildHandler - With BuildMiddleware -} - -var _ BuildHandler = (*decoratedBuildHandler)(nil) - -func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( - out BuildOutput, metadata Metadata, err error, -) { - return h.With.HandleBuild(ctx, in, h.Next) -} - -// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. -type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) - -// HandleBuild invokes the wrapped function with the provided arguments. -func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { - return b(ctx, in) -} - -var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go deleted file mode 100644 index 448607215..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go +++ /dev/null @@ -1,217 +0,0 @@ -package middleware - -import ( - "context" -) - -// DeserializeInput provides the input parameters for the DeserializeInput to -// consume. DeserializeMiddleware should not modify the Request, and instead -// forward it along to the next DeserializeHandler. -type DeserializeInput struct { - Request interface{} -} - -// DeserializeOutput provides the result returned by the next -// DeserializeHandler. The DeserializeMiddleware should deserialize the -// RawResponse into a Result that can be consumed by middleware higher up in -// the stack. -type DeserializeOutput struct { - RawResponse interface{} - Result interface{} -} - -// DeserializeHandler provides the interface for the next handler the -// DeserializeMiddleware will call in the middleware chain. -type DeserializeHandler interface { - HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next DeserializeHandler for further -// processing. -type DeserializeMiddleware interface { - // ID returns a unique ID for the middleware in the DeserializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleDeserialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, - ) -} - -// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID -// provided, and the func to be invoked. -func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { - return deserializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type deserializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, DeserializeInput, DeserializeHandler) ( - DeserializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s deserializeMiddlewareFunc) ID() string { return s.id } - -// HandleDeserialize invokes the middleware Fn. -func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) - -// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be -// invoked on a handler. -type DeserializeStep struct { - ids *orderedIDs -} - -// NewDeserializeStep returns a DeserializeStep ready to have middleware for -// initialization added to it. -func NewDeserializeStep() *DeserializeStep { - return &DeserializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*DeserializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *DeserializeStep) ID() string { - return "Deserialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h DeserializeHandler = deserializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedDeserializeHandler{ - Next: h, - With: order[i].(DeserializeMiddleware), - } - } - - sIn := DeserializeInput{ - Request: in, - } - - res, metadata, err := h.HandleDeserialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(DeserializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(DeserializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *DeserializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *DeserializeStep) Clear() { - s.ids.Clear() -} - -type deserializeWrapHandler struct { - Next Handler -} - -var _ DeserializeHandler = (*deserializeWrapHandler)(nil) - -// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying -// generic handler. -func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - resp, metadata, err := w.Next.Handle(ctx, in.Request) - return DeserializeOutput{ - RawResponse: resp, - }, metadata, err -} - -type decoratedDeserializeHandler struct { - Next DeserializeHandler - With DeserializeMiddleware -} - -var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) - -func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( - out DeserializeOutput, metadata Metadata, err error, -) { - return h.With.HandleDeserialize(ctx, in, h.Next) -} - -// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. -type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) - -// HandleDeserialize invokes the wrapped function with the given arguments. -func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { - return d(ctx, in) -} - -var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go deleted file mode 100644 index 065e3885d..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// FinalizeInput provides the input parameters for the FinalizeMiddleware to -// consume. FinalizeMiddleware may modify the Request value before forwarding -// the FinalizeInput along to the next next FinalizeHandler. -type FinalizeInput struct { - Request interface{} -} - -// FinalizeOutput provides the result returned by the next FinalizeHandler. -type FinalizeOutput struct { - Result interface{} -} - -// FinalizeHandler provides the interface for the next handler the -// FinalizeMiddleware will call in the middleware chain. -type FinalizeHandler interface { - HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next FinalizeHandler for further -// processing. -type FinalizeMiddleware interface { - // ID returns a unique ID for the middleware in the FinalizeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleFinalize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, - ) -} - -// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID -// provided, and the func to be invoked. -func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { - return finalizeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type finalizeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, FinalizeInput, FinalizeHandler) ( - FinalizeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s finalizeMiddlewareFunc) ID() string { return s.id } - -// HandleFinalize invokes the middleware Fn. -func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) - -// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be -// invoked on a handler. -type FinalizeStep struct { - ids *orderedIDs -} - -// NewFinalizeStep returns a FinalizeStep ready to have middleware for -// initialization added to it. -func NewFinalizeStep() *FinalizeStep { - return &FinalizeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*FinalizeStep)(nil) - -// ID returns the unique id of the step as a middleware. -func (s *FinalizeStep) ID() string { - return "Finalize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h FinalizeHandler = finalizeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedFinalizeHandler{ - Next: h, - With: order[i].(FinalizeMiddleware), - } - } - - sIn := FinalizeInput{ - Request: in, - } - - res, metadata, err := h.HandleFinalize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(FinalizeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(FinalizeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *FinalizeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *FinalizeStep) Clear() { - s.ids.Clear() -} - -type finalizeWrapHandler struct { - Next Handler -} - -var _ FinalizeHandler = (*finalizeWrapHandler)(nil) - -// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying -// generic handler. -func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return FinalizeOutput{ - Result: res, - }, metadata, err -} - -type decoratedFinalizeHandler struct { - Next FinalizeHandler - With FinalizeMiddleware -} - -var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) - -func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( - out FinalizeOutput, metadata Metadata, err error, -) { - return h.With.HandleFinalize(ctx, in, h.Next) -} - -// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. -type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) - -// HandleFinalize invokes the wrapped function with the given arguments. -func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { - return f(ctx, in) -} - -var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go deleted file mode 100644 index fe359144d..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go +++ /dev/null @@ -1,211 +0,0 @@ -package middleware - -import "context" - -// InitializeInput wraps the input parameters for the InitializeMiddlewares to -// consume. InitializeMiddleware may modify the parameter value before -// forwarding it along to the next InitializeHandler. -type InitializeInput struct { - Parameters interface{} -} - -// InitializeOutput provides the result returned by the next InitializeHandler. -type InitializeOutput struct { - Result interface{} -} - -// InitializeHandler provides the interface for the next handler the -// InitializeMiddleware will call in the middleware chain. -type InitializeHandler interface { - HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddleware provides the interface for middleware specific to the -// initialize step. Delegates to the next InitializeHandler for further -// processing. -type InitializeMiddleware interface { - // ID returns a unique ID for the middleware in the InitializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleInitialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, - ) -} - -// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, -// and the func to be invoked. -func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { - return initializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type initializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, InitializeInput, InitializeHandler) ( - InitializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s initializeMiddlewareFunc) ID() string { return s.id } - -// HandleInitialize invokes the middleware Fn. -func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( - out InitializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ InitializeMiddleware = (initializeMiddlewareFunc{}) - -// InitializeStep provides the ordered grouping of InitializeMiddleware to be -// invoked on a handler. -type InitializeStep struct { - ids *orderedIDs -} - -// NewInitializeStep returns an InitializeStep ready to have middleware for -// initialization added to it. -func NewInitializeStep() *InitializeStep { - return &InitializeStep{ - ids: newOrderedIDs(), - } -} - -var _ Middleware = (*InitializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *InitializeStep) ID() string { - return "Initialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h InitializeHandler = initializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedInitializeHandler{ - Next: h, - With: order[i].(InitializeMiddleware), - } - } - - sIn := InitializeInput{ - Parameters: in, - } - - res, metadata, err := h.HandleInitialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(InitializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(InitializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *InitializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *InitializeStep) Clear() { - s.ids.Clear() -} - -type initializeWrapHandler struct { - Next Handler -} - -var _ InitializeHandler = (*initializeWrapHandler)(nil) - -// HandleInitialize implements InitializeHandler, converts types and delegates to underlying -// generic handler. -func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Parameters) - return InitializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedInitializeHandler struct { - Next InitializeHandler - With InitializeMiddleware -} - -var _ InitializeHandler = (*decoratedInitializeHandler)(nil) - -func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( - out InitializeOutput, metadata Metadata, err error, -) { - return h.With.HandleInitialize(ctx, in, h.Next) -} - -// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. -type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) - -// HandleInitialize calls the wrapped function with the provided arguments. -func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { - return i(ctx, in) -} - -var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go deleted file mode 100644 index 114bafced..000000000 --- a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go +++ /dev/null @@ -1,219 +0,0 @@ -package middleware - -import "context" - -// SerializeInput provides the input parameters for the SerializeMiddleware to -// consume. SerializeMiddleware may modify the Request value before forwarding -// SerializeInput along to the next SerializeHandler. The Parameters member -// should not be modified by SerializeMiddleware, InitializeMiddleware should -// be responsible for modifying the provided Parameter value. -type SerializeInput struct { - Parameters interface{} - Request interface{} -} - -// SerializeOutput provides the result returned by the next SerializeHandler. -type SerializeOutput struct { - Result interface{} -} - -// SerializeHandler provides the interface for the next handler the -// SerializeMiddleware will call in the middleware chain. -type SerializeHandler interface { - HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddleware provides the interface for middleware specific to the -// serialize step. Delegates to the next SerializeHandler for further -// processing. -type SerializeMiddleware interface { - // ID returns a unique ID for the middleware in the SerializeStep. The step does not - // allow duplicate IDs. - ID() string - - // HandleSerialize invokes the middleware behavior which must delegate to the next handler - // for the middleware chain to continue. The method must return a result or - // error to its caller. - HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, - ) -} - -// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID -// provided, and the func to be invoked. -func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { - return serializeMiddlewareFunc{ - id: id, - fn: fn, - } -} - -type serializeMiddlewareFunc struct { - // Unique ID for the middleware. - id string - - // Middleware function to be called. - fn func(context.Context, SerializeInput, SerializeHandler) ( - SerializeOutput, Metadata, error, - ) -} - -// ID returns the unique ID for the middleware. -func (s serializeMiddlewareFunc) ID() string { return s.id } - -// HandleSerialize invokes the middleware Fn. -func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( - out SerializeOutput, metadata Metadata, err error, -) { - return s.fn(ctx, in, next) -} - -var _ SerializeMiddleware = (serializeMiddlewareFunc{}) - -// SerializeStep provides the ordered grouping of SerializeMiddleware to be -// invoked on a handler. -type SerializeStep struct { - newRequest func() interface{} - ids *orderedIDs -} - -// NewSerializeStep returns a SerializeStep ready to have middleware for -// initialization added to it. The newRequest func parameter is used to -// initialize the transport specific request for the stack SerializeStep to -// serialize the input parameters into. -func NewSerializeStep(newRequest func() interface{}) *SerializeStep { - return &SerializeStep{ - ids: newOrderedIDs(), - newRequest: newRequest, - } -} - -var _ Middleware = (*SerializeStep)(nil) - -// ID returns the unique ID of the step as a middleware. -func (s *SerializeStep) ID() string { - return "Serialize stack step" -} - -// HandleMiddleware invokes the middleware by decorating the next handler -// provided. Returns the result of the middleware and handler being invoked. -// -// Implements Middleware interface. -func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( - out interface{}, metadata Metadata, err error, -) { - order := s.ids.GetOrder() - - var h SerializeHandler = serializeWrapHandler{Next: next} - for i := len(order) - 1; i >= 0; i-- { - h = decoratedSerializeHandler{ - Next: h, - With: order[i].(SerializeMiddleware), - } - } - - sIn := SerializeInput{ - Parameters: in, - Request: s.newRequest(), - } - - res, metadata, err := h.HandleSerialize(ctx, sIn) - return res.Result, metadata, err -} - -// Get retrieves the middleware identified by id. If the middleware is not present, returns false. -func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { - get, ok := s.ids.Get(id) - if !ok { - return nil, false - } - return get.(SerializeMiddleware), ok -} - -// Add injects the middleware to the relative position of the middleware group. -// Returns an error if the middleware already exists. -func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { - return s.ids.Add(m, pos) -} - -// Insert injects the middleware relative to an existing middleware ID. -// Returns error if the original middleware does not exist, or the middleware -// being added already exists. -func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { - return s.ids.Insert(m, relativeTo, pos) -} - -// Swap removes the middleware by id, replacing it with the new middleware. -// Returns the middleware removed, or error if the middleware to be removed -// doesn't exist. -func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { - removed, err := s.ids.Swap(id, m) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// Remove removes the middleware by id. Returns error if the middleware -// doesn't exist. -func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { - removed, err := s.ids.Remove(id) - if err != nil { - return nil, err - } - - return removed.(SerializeMiddleware), nil -} - -// List returns a list of the middleware in the step. -func (s *SerializeStep) List() []string { - return s.ids.List() -} - -// Clear removes all middleware in the step. -func (s *SerializeStep) Clear() { - s.ids.Clear() -} - -type serializeWrapHandler struct { - Next Handler -} - -var _ SerializeHandler = (*serializeWrapHandler)(nil) - -// Implements SerializeHandler, converts types and delegates to underlying -// generic handler. -func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - res, metadata, err := w.Next.Handle(ctx, in.Request) - return SerializeOutput{ - Result: res, - }, metadata, err -} - -type decoratedSerializeHandler struct { - Next SerializeHandler - With SerializeMiddleware -} - -var _ SerializeHandler = (*decoratedSerializeHandler)(nil) - -func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( - out SerializeOutput, metadata Metadata, err error, -) { - return h.With.HandleSerialize(ctx, in, h.Next) -} - -// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. -type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) - -// HandleSerialize calls the wrapped function with the provided arguments. -func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { - return s(ctx, in) -} - -var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml deleted file mode 100644 index 9d94b7cbd..000000000 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ /dev/null @@ -1,10 +0,0 @@ -[dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" - -[modules] - - [modules.codegen] - no_tag = true - - [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] - no_tag = true diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go deleted file mode 100644 index 004d78f21..000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go +++ /dev/null @@ -1,30 +0,0 @@ -package requestcompression - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" -) - -func gzipCompress(input io.Reader) ([]byte, error) { - var b bytes.Buffer - w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression) - if err != nil { - return nil, fmt.Errorf("failed to create gzip writer, %v", err) - } - - inBytes, err := io.ReadAll(input) - if err != nil { - return nil, fmt.Errorf("failed read payload to compress, %v", err) - } - - if _, err = w.Write(inBytes); err != nil { - return nil, fmt.Errorf("failed to write payload to be compressed, %v", err) - } - if err = w.Close(); err != nil { - return nil, fmt.Errorf("failed to flush payload being compressed, %v", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go deleted file mode 100644 index 06c16afc1..000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go +++ /dev/null @@ -1,52 +0,0 @@ -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" - "io" - "net/http" -) - -const captureUncompressedRequestID = "CaptureUncompressedRequest" - -// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check -func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error { - return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{ - buf: buf, - }, "RequestCompression", middleware.Before) -} - -type captureUncompressedRequestMiddleware struct { - req *http.Request - buf *bytes.Buffer - bytes []byte -} - -// ID returns id of the captureUncompressedRequestMiddleware -func (*captureUncompressedRequestMiddleware) ID() string { - return captureUncompressedRequestID -} - -// HandleSerialize captures request payload before it is compressed by request compression middleware -func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - output middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - request, ok := input.Request.(*smithyhttp.Request) - if !ok { - return output, metadata, fmt.Errorf("error when retrieving http request") - } - - _, err = io.Copy(m.buf, request.GetStream()) - if err != nil { - return output, metadata, fmt.Errorf("error when copying http request stream: %q", err) - } - if err = request.RewindStream(); err != nil { - return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err) - } - - return next.HandleSerialize(ctx, input) -} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go deleted file mode 100644 index 7c4147603..000000000 --- a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package requestcompression implements runtime support for smithy-modeled -// request compression. -// -// This package is designated as private and is intended for use only by the -// smithy client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package requestcompression - -import ( - "bytes" - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/transport/http" - "io" -) - -const MaxRequestMinCompressSizeBytes = 10485760 - -// Enumeration values for supported compress Algorithms. -const ( - GZIP = "gzip" -) - -type compressFunc func(io.Reader) ([]byte, error) - -var allowedAlgorithms = map[string]compressFunc{ - GZIP: gzipCompress, -} - -// AddRequestCompression add requestCompression middleware to op stack -func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error { - return stack.Serialize.Add(&requestCompression{ - disableRequestCompression: disabled, - requestMinCompressSizeBytes: minBytes, - compressAlgorithms: algorithms, - }, middleware.After) -} - -type requestCompression struct { - disableRequestCompression bool - requestMinCompressSizeBytes int64 - compressAlgorithms []string -} - -// ID returns the ID of the middleware -func (m requestCompression) ID() string { - return "RequestCompression" -} - -// HandleSerialize gzip compress the request's stream/body if enabled by config fields -func (m requestCompression) HandleSerialize( - ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, -) { - if m.disableRequestCompression { - return next.HandleSerialize(ctx, in) - } - // still need to check requestMinCompressSizeBytes in case it is out of range after service client config - if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes { - return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes) - } - - req, ok := in.Request.(*http.Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - for _, algorithm := range m.compressAlgorithms { - compressFunc := allowedAlgorithms[algorithm] - if compressFunc != nil { - if stream := req.GetStream(); stream != nil { - size, found, err := req.StreamLength() - if err != nil { - return out, metadata, fmt.Errorf("error while finding request stream length, %v", err) - } else if !found || size < m.requestMinCompressSizeBytes { - return next.HandleSerialize(ctx, in) - } - - compressedBytes, err := compressFunc(stream) - if err != nil { - return out, metadata, fmt.Errorf("failed to compress request stream, %v", err) - } - - var newReq *http.Request - if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil { - return out, metadata, fmt.Errorf("failed to set request stream, %v", err) - } - *req = *newReq - - if val := req.Header.Get("Content-Encoding"); val != "" { - req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm)) - } else { - req.Header.Set("Content-Encoding", algorithm) - } - } - break - } - } - - return next.HandleSerialize(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go deleted file mode 100644 index 68df4c4e0..000000000 --- a/vendor/github.com/aws/smithy-go/properties.go +++ /dev/null @@ -1,69 +0,0 @@ -package smithy - -import "maps" - -// PropertiesReader provides an interface for reading metadata from the -// underlying metadata container. -type PropertiesReader interface { - Get(key any) any -} - -// Properties provides storing and reading metadata values. Keys may be any -// comparable value type. Get and Set will panic if a key is not comparable. -// -// The zero value for a Properties instance is ready for reads/writes without -// any additional initialization. -type Properties struct { - values map[any]any -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m *Properties) Get(key any) any { - m.lazyInit() - return m.values[key] -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Panics if the key type is not comparable. -func (m *Properties) Set(key, value any) { - m.lazyInit() - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m *Properties) Has(key any) bool { - m.lazyInit() - _, ok := m.values[key] - return ok -} - -// SetAll accepts all of the given Properties into the receiver, overwriting -// any existing keys in the case of conflicts. -func (m *Properties) SetAll(other *Properties) { - if other.values == nil { - return - } - - m.lazyInit() - for k, v := range other.values { - m.values[k] = v - } -} - -// Values returns a shallow clone of the property set's values. -func (m *Properties) Values() map[any]any { - return maps.Clone(m.values) -} - -func (m *Properties) lazyInit() { - if m.values == nil { - m.values = map[any]any{} - } -} diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go deleted file mode 100644 index bc1f69961..000000000 --- a/vendor/github.com/aws/smithy-go/ptr/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. -package ptr - -//go:generate go run -tags codegen generate.go -//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go deleted file mode 100644 index a2845bb2c..000000000 --- a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go +++ /dev/null @@ -1,601 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// ToBool returns bool value dereferenced if the passed -// in pointer was not nil. Returns a bool zero value if the -// pointer was nil. -func ToBool(p *bool) (v bool) { - if p == nil { - return v - } - - return *p -} - -// ToBoolSlice returns a slice of bool values, that are -// dereferenced if the passed in pointer was not nil. Returns a bool -// zero value if the pointer was nil. -func ToBoolSlice(vs []*bool) []bool { - ps := make([]bool, len(vs)) - for i, v := range vs { - ps[i] = ToBool(v) - } - - return ps -} - -// ToBoolMap returns a map of bool values, that are -// dereferenced if the passed in pointer was not nil. The bool -// zero value is used if the pointer was nil. -func ToBoolMap(vs map[string]*bool) map[string]bool { - ps := make(map[string]bool, len(vs)) - for k, v := range vs { - ps[k] = ToBool(v) - } - - return ps -} - -// ToByte returns byte value dereferenced if the passed -// in pointer was not nil. Returns a byte zero value if the -// pointer was nil. -func ToByte(p *byte) (v byte) { - if p == nil { - return v - } - - return *p -} - -// ToByteSlice returns a slice of byte values, that are -// dereferenced if the passed in pointer was not nil. Returns a byte -// zero value if the pointer was nil. -func ToByteSlice(vs []*byte) []byte { - ps := make([]byte, len(vs)) - for i, v := range vs { - ps[i] = ToByte(v) - } - - return ps -} - -// ToByteMap returns a map of byte values, that are -// dereferenced if the passed in pointer was not nil. The byte -// zero value is used if the pointer was nil. -func ToByteMap(vs map[string]*byte) map[string]byte { - ps := make(map[string]byte, len(vs)) - for k, v := range vs { - ps[k] = ToByte(v) - } - - return ps -} - -// ToString returns string value dereferenced if the passed -// in pointer was not nil. Returns a string zero value if the -// pointer was nil. -func ToString(p *string) (v string) { - if p == nil { - return v - } - - return *p -} - -// ToStringSlice returns a slice of string values, that are -// dereferenced if the passed in pointer was not nil. Returns a string -// zero value if the pointer was nil. -func ToStringSlice(vs []*string) []string { - ps := make([]string, len(vs)) - for i, v := range vs { - ps[i] = ToString(v) - } - - return ps -} - -// ToStringMap returns a map of string values, that are -// dereferenced if the passed in pointer was not nil. The string -// zero value is used if the pointer was nil. -func ToStringMap(vs map[string]*string) map[string]string { - ps := make(map[string]string, len(vs)) - for k, v := range vs { - ps[k] = ToString(v) - } - - return ps -} - -// ToInt returns int value dereferenced if the passed -// in pointer was not nil. Returns a int zero value if the -// pointer was nil. -func ToInt(p *int) (v int) { - if p == nil { - return v - } - - return *p -} - -// ToIntSlice returns a slice of int values, that are -// dereferenced if the passed in pointer was not nil. Returns a int -// zero value if the pointer was nil. -func ToIntSlice(vs []*int) []int { - ps := make([]int, len(vs)) - for i, v := range vs { - ps[i] = ToInt(v) - } - - return ps -} - -// ToIntMap returns a map of int values, that are -// dereferenced if the passed in pointer was not nil. The int -// zero value is used if the pointer was nil. -func ToIntMap(vs map[string]*int) map[string]int { - ps := make(map[string]int, len(vs)) - for k, v := range vs { - ps[k] = ToInt(v) - } - - return ps -} - -// ToInt8 returns int8 value dereferenced if the passed -// in pointer was not nil. Returns a int8 zero value if the -// pointer was nil. -func ToInt8(p *int8) (v int8) { - if p == nil { - return v - } - - return *p -} - -// ToInt8Slice returns a slice of int8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int8 -// zero value if the pointer was nil. -func ToInt8Slice(vs []*int8) []int8 { - ps := make([]int8, len(vs)) - for i, v := range vs { - ps[i] = ToInt8(v) - } - - return ps -} - -// ToInt8Map returns a map of int8 values, that are -// dereferenced if the passed in pointer was not nil. The int8 -// zero value is used if the pointer was nil. -func ToInt8Map(vs map[string]*int8) map[string]int8 { - ps := make(map[string]int8, len(vs)) - for k, v := range vs { - ps[k] = ToInt8(v) - } - - return ps -} - -// ToInt16 returns int16 value dereferenced if the passed -// in pointer was not nil. Returns a int16 zero value if the -// pointer was nil. -func ToInt16(p *int16) (v int16) { - if p == nil { - return v - } - - return *p -} - -// ToInt16Slice returns a slice of int16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int16 -// zero value if the pointer was nil. -func ToInt16Slice(vs []*int16) []int16 { - ps := make([]int16, len(vs)) - for i, v := range vs { - ps[i] = ToInt16(v) - } - - return ps -} - -// ToInt16Map returns a map of int16 values, that are -// dereferenced if the passed in pointer was not nil. The int16 -// zero value is used if the pointer was nil. -func ToInt16Map(vs map[string]*int16) map[string]int16 { - ps := make(map[string]int16, len(vs)) - for k, v := range vs { - ps[k] = ToInt16(v) - } - - return ps -} - -// ToInt32 returns int32 value dereferenced if the passed -// in pointer was not nil. Returns a int32 zero value if the -// pointer was nil. -func ToInt32(p *int32) (v int32) { - if p == nil { - return v - } - - return *p -} - -// ToInt32Slice returns a slice of int32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int32 -// zero value if the pointer was nil. -func ToInt32Slice(vs []*int32) []int32 { - ps := make([]int32, len(vs)) - for i, v := range vs { - ps[i] = ToInt32(v) - } - - return ps -} - -// ToInt32Map returns a map of int32 values, that are -// dereferenced if the passed in pointer was not nil. The int32 -// zero value is used if the pointer was nil. -func ToInt32Map(vs map[string]*int32) map[string]int32 { - ps := make(map[string]int32, len(vs)) - for k, v := range vs { - ps[k] = ToInt32(v) - } - - return ps -} - -// ToInt64 returns int64 value dereferenced if the passed -// in pointer was not nil. Returns a int64 zero value if the -// pointer was nil. -func ToInt64(p *int64) (v int64) { - if p == nil { - return v - } - - return *p -} - -// ToInt64Slice returns a slice of int64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a int64 -// zero value if the pointer was nil. -func ToInt64Slice(vs []*int64) []int64 { - ps := make([]int64, len(vs)) - for i, v := range vs { - ps[i] = ToInt64(v) - } - - return ps -} - -// ToInt64Map returns a map of int64 values, that are -// dereferenced if the passed in pointer was not nil. The int64 -// zero value is used if the pointer was nil. -func ToInt64Map(vs map[string]*int64) map[string]int64 { - ps := make(map[string]int64, len(vs)) - for k, v := range vs { - ps[k] = ToInt64(v) - } - - return ps -} - -// ToUint returns uint value dereferenced if the passed -// in pointer was not nil. Returns a uint zero value if the -// pointer was nil. -func ToUint(p *uint) (v uint) { - if p == nil { - return v - } - - return *p -} - -// ToUintSlice returns a slice of uint values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint -// zero value if the pointer was nil. -func ToUintSlice(vs []*uint) []uint { - ps := make([]uint, len(vs)) - for i, v := range vs { - ps[i] = ToUint(v) - } - - return ps -} - -// ToUintMap returns a map of uint values, that are -// dereferenced if the passed in pointer was not nil. The uint -// zero value is used if the pointer was nil. -func ToUintMap(vs map[string]*uint) map[string]uint { - ps := make(map[string]uint, len(vs)) - for k, v := range vs { - ps[k] = ToUint(v) - } - - return ps -} - -// ToUint8 returns uint8 value dereferenced if the passed -// in pointer was not nil. Returns a uint8 zero value if the -// pointer was nil. -func ToUint8(p *uint8) (v uint8) { - if p == nil { - return v - } - - return *p -} - -// ToUint8Slice returns a slice of uint8 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint8 -// zero value if the pointer was nil. -func ToUint8Slice(vs []*uint8) []uint8 { - ps := make([]uint8, len(vs)) - for i, v := range vs { - ps[i] = ToUint8(v) - } - - return ps -} - -// ToUint8Map returns a map of uint8 values, that are -// dereferenced if the passed in pointer was not nil. The uint8 -// zero value is used if the pointer was nil. -func ToUint8Map(vs map[string]*uint8) map[string]uint8 { - ps := make(map[string]uint8, len(vs)) - for k, v := range vs { - ps[k] = ToUint8(v) - } - - return ps -} - -// ToUint16 returns uint16 value dereferenced if the passed -// in pointer was not nil. Returns a uint16 zero value if the -// pointer was nil. -func ToUint16(p *uint16) (v uint16) { - if p == nil { - return v - } - - return *p -} - -// ToUint16Slice returns a slice of uint16 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint16 -// zero value if the pointer was nil. -func ToUint16Slice(vs []*uint16) []uint16 { - ps := make([]uint16, len(vs)) - for i, v := range vs { - ps[i] = ToUint16(v) - } - - return ps -} - -// ToUint16Map returns a map of uint16 values, that are -// dereferenced if the passed in pointer was not nil. The uint16 -// zero value is used if the pointer was nil. -func ToUint16Map(vs map[string]*uint16) map[string]uint16 { - ps := make(map[string]uint16, len(vs)) - for k, v := range vs { - ps[k] = ToUint16(v) - } - - return ps -} - -// ToUint32 returns uint32 value dereferenced if the passed -// in pointer was not nil. Returns a uint32 zero value if the -// pointer was nil. -func ToUint32(p *uint32) (v uint32) { - if p == nil { - return v - } - - return *p -} - -// ToUint32Slice returns a slice of uint32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint32 -// zero value if the pointer was nil. -func ToUint32Slice(vs []*uint32) []uint32 { - ps := make([]uint32, len(vs)) - for i, v := range vs { - ps[i] = ToUint32(v) - } - - return ps -} - -// ToUint32Map returns a map of uint32 values, that are -// dereferenced if the passed in pointer was not nil. The uint32 -// zero value is used if the pointer was nil. -func ToUint32Map(vs map[string]*uint32) map[string]uint32 { - ps := make(map[string]uint32, len(vs)) - for k, v := range vs { - ps[k] = ToUint32(v) - } - - return ps -} - -// ToUint64 returns uint64 value dereferenced if the passed -// in pointer was not nil. Returns a uint64 zero value if the -// pointer was nil. -func ToUint64(p *uint64) (v uint64) { - if p == nil { - return v - } - - return *p -} - -// ToUint64Slice returns a slice of uint64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a uint64 -// zero value if the pointer was nil. -func ToUint64Slice(vs []*uint64) []uint64 { - ps := make([]uint64, len(vs)) - for i, v := range vs { - ps[i] = ToUint64(v) - } - - return ps -} - -// ToUint64Map returns a map of uint64 values, that are -// dereferenced if the passed in pointer was not nil. The uint64 -// zero value is used if the pointer was nil. -func ToUint64Map(vs map[string]*uint64) map[string]uint64 { - ps := make(map[string]uint64, len(vs)) - for k, v := range vs { - ps[k] = ToUint64(v) - } - - return ps -} - -// ToFloat32 returns float32 value dereferenced if the passed -// in pointer was not nil. Returns a float32 zero value if the -// pointer was nil. -func ToFloat32(p *float32) (v float32) { - if p == nil { - return v - } - - return *p -} - -// ToFloat32Slice returns a slice of float32 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float32 -// zero value if the pointer was nil. -func ToFloat32Slice(vs []*float32) []float32 { - ps := make([]float32, len(vs)) - for i, v := range vs { - ps[i] = ToFloat32(v) - } - - return ps -} - -// ToFloat32Map returns a map of float32 values, that are -// dereferenced if the passed in pointer was not nil. The float32 -// zero value is used if the pointer was nil. -func ToFloat32Map(vs map[string]*float32) map[string]float32 { - ps := make(map[string]float32, len(vs)) - for k, v := range vs { - ps[k] = ToFloat32(v) - } - - return ps -} - -// ToFloat64 returns float64 value dereferenced if the passed -// in pointer was not nil. Returns a float64 zero value if the -// pointer was nil. -func ToFloat64(p *float64) (v float64) { - if p == nil { - return v - } - - return *p -} - -// ToFloat64Slice returns a slice of float64 values, that are -// dereferenced if the passed in pointer was not nil. Returns a float64 -// zero value if the pointer was nil. -func ToFloat64Slice(vs []*float64) []float64 { - ps := make([]float64, len(vs)) - for i, v := range vs { - ps[i] = ToFloat64(v) - } - - return ps -} - -// ToFloat64Map returns a map of float64 values, that are -// dereferenced if the passed in pointer was not nil. The float64 -// zero value is used if the pointer was nil. -func ToFloat64Map(vs map[string]*float64) map[string]float64 { - ps := make(map[string]float64, len(vs)) - for k, v := range vs { - ps[k] = ToFloat64(v) - } - - return ps -} - -// ToTime returns time.Time value dereferenced if the passed -// in pointer was not nil. Returns a time.Time zero value if the -// pointer was nil. -func ToTime(p *time.Time) (v time.Time) { - if p == nil { - return v - } - - return *p -} - -// ToTimeSlice returns a slice of time.Time values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Time -// zero value if the pointer was nil. -func ToTimeSlice(vs []*time.Time) []time.Time { - ps := make([]time.Time, len(vs)) - for i, v := range vs { - ps[i] = ToTime(v) - } - - return ps -} - -// ToTimeMap returns a map of time.Time values, that are -// dereferenced if the passed in pointer was not nil. The time.Time -// zero value is used if the pointer was nil. -func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { - ps := make(map[string]time.Time, len(vs)) - for k, v := range vs { - ps[k] = ToTime(v) - } - - return ps -} - -// ToDuration returns time.Duration value dereferenced if the passed -// in pointer was not nil. Returns a time.Duration zero value if the -// pointer was nil. -func ToDuration(p *time.Duration) (v time.Duration) { - if p == nil { - return v - } - - return *p -} - -// ToDurationSlice returns a slice of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. Returns a time.Duration -// zero value if the pointer was nil. -func ToDurationSlice(vs []*time.Duration) []time.Duration { - ps := make([]time.Duration, len(vs)) - for i, v := range vs { - ps[i] = ToDuration(v) - } - - return ps -} - -// ToDurationMap returns a map of time.Duration values, that are -// dereferenced if the passed in pointer was not nil. The time.Duration -// zero value is used if the pointer was nil. -func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { - ps := make(map[string]time.Duration, len(vs)) - for k, v := range vs { - ps[k] = ToDuration(v) - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go deleted file mode 100644 index 97f01011e..000000000 --- a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build codegen -// +build codegen - -package ptr - -import "strings" - -func GetScalars() Scalars { - return Scalars{ - {Type: "bool"}, - {Type: "byte"}, - {Type: "string"}, - {Type: "int"}, - {Type: "int8"}, - {Type: "int16"}, - {Type: "int32"}, - {Type: "int64"}, - {Type: "uint"}, - {Type: "uint8"}, - {Type: "uint16"}, - {Type: "uint32"}, - {Type: "uint64"}, - {Type: "float32"}, - {Type: "float64"}, - {Type: "Time", Import: &Import{Path: "time"}}, - {Type: "Duration", Import: &Import{Path: "time"}}, - } -} - -// Import provides the import path and optional alias -type Import struct { - Path string - Alias string -} - -// Package returns the Go package name for the import. Returns alias if set. -func (i Import) Package() string { - if v := i.Alias; len(v) != 0 { - return v - } - - if v := i.Path; len(v) != 0 { - parts := strings.Split(v, "/") - pkg := parts[len(parts)-1] - return pkg - } - - return "" -} - -// Scalar provides the definition of a type to generate pointer utilities for. -type Scalar struct { - Type string - Import *Import -} - -// Name returns the exported function name for the type. -func (t Scalar) Name() string { - return strings.Title(t.Type) -} - -// Symbol returns the scalar's Go symbol with path if needed. -func (t Scalar) Symbol() string { - if t.Import != nil { - return t.Import.Package() + "." + t.Type - } - return t.Type -} - -// Scalars is a list of scalars. -type Scalars []Scalar - -// Imports returns all imports for the scalars. -func (ts Scalars) Imports() []*Import { - imports := []*Import{} - for _, t := range ts { - if v := t.Import; v != nil { - imports = append(imports, v) - } - } - - return imports -} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go deleted file mode 100644 index 0bfbbecbd..000000000 --- a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go +++ /dev/null @@ -1,499 +0,0 @@ -// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. -package ptr - -import ( - "time" -) - -// Bool returns a pointer value for the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolSlice returns a slice of bool pointers from the values -// passed in. -func BoolSlice(vs []bool) []*bool { - ps := make([]*bool, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// BoolMap returns a map of bool pointers from the values -// passed in. -func BoolMap(vs map[string]bool) map[string]*bool { - ps := make(map[string]*bool, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Byte returns a pointer value for the byte value passed in. -func Byte(v byte) *byte { - return &v -} - -// ByteSlice returns a slice of byte pointers from the values -// passed in. -func ByteSlice(vs []byte) []*byte { - ps := make([]*byte, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// ByteMap returns a map of byte pointers from the values -// passed in. -func ByteMap(vs map[string]byte) map[string]*byte { - ps := make(map[string]*byte, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// String returns a pointer value for the string value passed in. -func String(v string) *string { - return &v -} - -// StringSlice returns a slice of string pointers from the values -// passed in. -func StringSlice(vs []string) []*string { - ps := make([]*string, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// StringMap returns a map of string pointers from the values -// passed in. -func StringMap(vs map[string]string) map[string]*string { - ps := make(map[string]*string, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int returns a pointer value for the int value passed in. -func Int(v int) *int { - return &v -} - -// IntSlice returns a slice of int pointers from the values -// passed in. -func IntSlice(vs []int) []*int { - ps := make([]*int, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// IntMap returns a map of int pointers from the values -// passed in. -func IntMap(vs map[string]int) map[string]*int { - ps := make(map[string]*int, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int8 returns a pointer value for the int8 value passed in. -func Int8(v int8) *int8 { - return &v -} - -// Int8Slice returns a slice of int8 pointers from the values -// passed in. -func Int8Slice(vs []int8) []*int8 { - ps := make([]*int8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int8Map returns a map of int8 pointers from the values -// passed in. -func Int8Map(vs map[string]int8) map[string]*int8 { - ps := make(map[string]*int8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int16 returns a pointer value for the int16 value passed in. -func Int16(v int16) *int16 { - return &v -} - -// Int16Slice returns a slice of int16 pointers from the values -// passed in. -func Int16Slice(vs []int16) []*int16 { - ps := make([]*int16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int16Map returns a map of int16 pointers from the values -// passed in. -func Int16Map(vs map[string]int16) map[string]*int16 { - ps := make(map[string]*int16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int32 returns a pointer value for the int32 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Slice returns a slice of int32 pointers from the values -// passed in. -func Int32Slice(vs []int32) []*int32 { - ps := make([]*int32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int32Map returns a map of int32 pointers from the values -// passed in. -func Int32Map(vs map[string]int32) map[string]*int32 { - ps := make(map[string]*int32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Int64 returns a pointer value for the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Slice returns a slice of int64 pointers from the values -// passed in. -func Int64Slice(vs []int64) []*int64 { - ps := make([]*int64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Int64Map returns a map of int64 pointers from the values -// passed in. -func Int64Map(vs map[string]int64) map[string]*int64 { - ps := make(map[string]*int64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint returns a pointer value for the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintSlice returns a slice of uint pointers from the values -// passed in. -func UintSlice(vs []uint) []*uint { - ps := make([]*uint, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// UintMap returns a map of uint pointers from the values -// passed in. -func UintMap(vs map[string]uint) map[string]*uint { - ps := make(map[string]*uint, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint8 returns a pointer value for the uint8 value passed in. -func Uint8(v uint8) *uint8 { - return &v -} - -// Uint8Slice returns a slice of uint8 pointers from the values -// passed in. -func Uint8Slice(vs []uint8) []*uint8 { - ps := make([]*uint8, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint8Map returns a map of uint8 pointers from the values -// passed in. -func Uint8Map(vs map[string]uint8) map[string]*uint8 { - ps := make(map[string]*uint8, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint16 returns a pointer value for the uint16 value passed in. -func Uint16(v uint16) *uint16 { - return &v -} - -// Uint16Slice returns a slice of uint16 pointers from the values -// passed in. -func Uint16Slice(vs []uint16) []*uint16 { - ps := make([]*uint16, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint16Map returns a map of uint16 pointers from the values -// passed in. -func Uint16Map(vs map[string]uint16) map[string]*uint16 { - ps := make(map[string]*uint16, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint32 returns a pointer value for the uint32 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Slice returns a slice of uint32 pointers from the values -// passed in. -func Uint32Slice(vs []uint32) []*uint32 { - ps := make([]*uint32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint32Map returns a map of uint32 pointers from the values -// passed in. -func Uint32Map(vs map[string]uint32) map[string]*uint32 { - ps := make(map[string]*uint32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Uint64 returns a pointer value for the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Slice returns a slice of uint64 pointers from the values -// passed in. -func Uint64Slice(vs []uint64) []*uint64 { - ps := make([]*uint64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Uint64Map returns a map of uint64 pointers from the values -// passed in. -func Uint64Map(vs map[string]uint64) map[string]*uint64 { - ps := make(map[string]*uint64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float32 returns a pointer value for the float32 value passed in. -func Float32(v float32) *float32 { - return &v -} - -// Float32Slice returns a slice of float32 pointers from the values -// passed in. -func Float32Slice(vs []float32) []*float32 { - ps := make([]*float32, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float32Map returns a map of float32 pointers from the values -// passed in. -func Float32Map(vs map[string]float32) map[string]*float32 { - ps := make(map[string]*float32, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Float64 returns a pointer value for the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Slice returns a slice of float64 pointers from the values -// passed in. -func Float64Slice(vs []float64) []*float64 { - ps := make([]*float64, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// Float64Map returns a map of float64 pointers from the values -// passed in. -func Float64Map(vs map[string]float64) map[string]*float64 { - ps := make(map[string]*float64, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Time returns a pointer value for the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeSlice returns a slice of time.Time pointers from the values -// passed in. -func TimeSlice(vs []time.Time) []*time.Time { - ps := make([]*time.Time, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// TimeMap returns a map of time.Time pointers from the values -// passed in. -func TimeMap(vs map[string]time.Time) map[string]*time.Time { - ps := make(map[string]*time.Time, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} - -// Duration returns a pointer value for the time.Duration value passed in. -func Duration(v time.Duration) *time.Duration { - return &v -} - -// DurationSlice returns a slice of time.Duration pointers from the values -// passed in. -func DurationSlice(vs []time.Duration) []*time.Duration { - ps := make([]*time.Duration, len(vs)) - for i, v := range vs { - vv := v - ps[i] = &vv - } - - return ps -} - -// DurationMap returns a map of time.Duration pointers from the values -// passed in. -func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { - ps := make(map[string]*time.Duration, len(vs)) - for k, v := range vs { - vv := v - ps[k] = &vv - } - - return ps -} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go deleted file mode 100644 index f8b25d562..000000000 --- a/vendor/github.com/aws/smithy-go/rand/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package rand provides utilities for creating and working with random value -// generators. -package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go deleted file mode 100644 index 9c479f62b..000000000 --- a/vendor/github.com/aws/smithy-go/rand/rand.go +++ /dev/null @@ -1,31 +0,0 @@ -package rand - -import ( - "crypto/rand" - "fmt" - "io" - "math/big" -) - -func init() { - Reader = rand.Reader -} - -// Reader provides a random reader that can reset during testing. -var Reader io.Reader - -// Int63n returns a int64 between zero and value of max, read from an io.Reader source. -func Int63n(reader io.Reader, max int64) (int64, error) { - bi, err := rand.Int(reader, big.NewInt(max)) - if err != nil { - return 0, fmt.Errorf("failed to read random value, %w", err) - } - - return bi.Int64(), nil -} - -// CryptoRandInt63n returns a random int64 between zero and value of max -// obtained from the crypto rand source. -func CryptoRandInt63n(max int64) (int64, error) { - return Int63n(Reader, max) -} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go deleted file mode 100644 index dc81cbc68..000000000 --- a/vendor/github.com/aws/smithy-go/rand/uuid.go +++ /dev/null @@ -1,87 +0,0 @@ -package rand - -import ( - "encoding/hex" - "io" -) - -const dash byte = '-' - -// UUIDIdempotencyToken provides a utility to get idempotency tokens in the -// UUID format. -type UUIDIdempotencyToken struct { - uuid *UUID -} - -// NewUUIDIdempotencyToken returns a idempotency token provider returning -// tokens in the UUID random format using the reader provided. -func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { - return &UUIDIdempotencyToken{uuid: NewUUID(r)} -} - -// GetIdempotencyToken returns a random UUID value for Idempotency token. -func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { - return u.uuid.GetUUID() -} - -// UUID provides computing random UUID version 4 values from a random source -// reader. -type UUID struct { - randSrc io.Reader -} - -// NewUUID returns an initialized UUID value that can be used to retrieve -// random UUID version 4 values. -func NewUUID(r io.Reader) *UUID { - return &UUID{randSrc: r} -} - -// GetUUID returns a random UUID version 4 string representation sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetUUID() (string, error) { - var b [16]byte - if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { - return "", err - } - r.makeUUIDv4(b[:]) - return format(b), nil -} - -// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the -// UUID was created with. Returns an error if unable to compute the UUID. -func (r *UUID) GetBytes() (u []byte, err error) { - u = make([]byte, 16) - if _, err = io.ReadFull(r.randSrc, u); err != nil { - return u, err - } - r.makeUUIDv4(u) - return u, nil -} - -func (r *UUID) makeUUIDv4(u []byte) { - // 13th character is "4" - u[6] = (u[6] & 0x0f) | 0x40 // Version 4 - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 -} - -// Format returns the canonical text representation of a UUID. -// This implementation is optimized to not use fmt. -// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d -func format(u [16]byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - - var scratch [36]byte - - hex.Encode(scratch[:8], u[0:4]) - scratch[8] = dash - hex.Encode(scratch[9:13], u[4:6]) - scratch[13] = dash - hex.Encode(scratch[14:18], u[6:8]) - scratch[18] = dash - hex.Encode(scratch[19:23], u[8:10]) - scratch[23] = dash - hex.Encode(scratch[24:], u[10:]) - - return string(scratch[:]) -} diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go deleted file mode 100644 index 629207672..000000000 --- a/vendor/github.com/aws/smithy-go/sync/error.go +++ /dev/null @@ -1,53 +0,0 @@ -package sync - -import "sync" - -// OnceErr wraps the behavior of recording an error -// once and signal on a channel when this has occurred. -// Signaling is done by closing of the channel. -// -// Type is safe for concurrent usage. -type OnceErr struct { - mu sync.RWMutex - err error - ch chan struct{} -} - -// NewOnceErr return a new OnceErr -func NewOnceErr() *OnceErr { - return &OnceErr{ - ch: make(chan struct{}, 1), - } -} - -// Err acquires a read-lock and returns an -// error if one has been set. -func (e *OnceErr) Err() error { - e.mu.RLock() - err := e.err - e.mu.RUnlock() - - return err -} - -// SetError acquires a write-lock and will set -// the underlying error value if one has not been set. -func (e *OnceErr) SetError(err error) { - if err == nil { - return - } - - e.mu.Lock() - if e.err == nil { - e.err = err - close(e.ch) - } - e.mu.Unlock() -} - -// ErrorSet returns a channel that will be used to signal -// that an error has been set. This channel will be closed -// when the error value has been set for OnceErr. -func (e *OnceErr) ErrorSet() <-chan struct{} { - return e.ch -} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go deleted file mode 100644 index b552a09f8..000000000 --- a/vendor/github.com/aws/smithy-go/time/time.go +++ /dev/null @@ -1,134 +0,0 @@ -package time - -import ( - "context" - "fmt" - "math/big" - "strings" - "time" -) - -const ( - // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 - dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" - dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" - dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" - - // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 - // IMF-fixdate with no UTC offset. - httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - // Additional formats needed for compatibility. - httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" - httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" -) - -var millisecondFloat = big.NewFloat(1e3) - -// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func FormatDateTime(value time.Time) string { - return value.UTC().Format(dateTimeFormatOutput) -} - -// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) -// -// Example: 1985-04-12T23:20:50.52Z -func ParseDateTime(value string) (time.Time, error) { - return tryParse(value, - dateTimeFormatInput, - dateTimeFormatInputNoZ, - time.RFC3339Nano, - time.RFC3339, - ) -} - -// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func FormatHTTPDate(value time.Time) string { - return value.UTC().Format(httpDateFormat) -} - -// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) -// -// Example: Tue, 29 Apr 2014 18:30:38 GMT -func ParseHTTPDate(value string) (time.Time, error) { - return tryParse(value, - httpDateFormat, - httpDateFormatSingleDigitDay, - httpDateFormatSingleDigitDayTwoDigitYear, - time.RFC850, - time.ANSIC, - ) -} - -// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func FormatEpochSeconds(value time.Time) float64 { - ms := value.UnixNano() / int64(time.Millisecond) - return float64(ms) / 1e3 -} - -// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision -// -// Example: 1515531081.123 -func ParseEpochSeconds(value float64) time.Time { - f := big.NewFloat(value) - f = f.Mul(f, millisecondFloat) - i, _ := f.Int64() - // Offset to `UTC` because time.Unix returns the time value based on system - // local setting. - return time.Unix(0, i*1e6).UTC() -} - -func tryParse(v string, formats ...string) (time.Time, error) { - var errs parseErrors - for _, f := range formats { - t, err := time.Parse(f, v) - if err != nil { - errs = append(errs, parseError{ - Format: f, - Err: err, - }) - continue - } - return t, nil - } - - return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) -} - -type parseErrors []parseError - -func (es parseErrors) Error() string { - var s strings.Builder - for _, e := range es { - fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) - } - - return "parse errors:" + s.String() -} - -type parseError struct { - Format string - Err error -} - -// SleepWithContext will wait for the timer duration to expire, or until the context -// is canceled. Whichever happens first. If the context is canceled the -// Context's error will be returned. -func SleepWithContext(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - break - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go deleted file mode 100644 index a404ed9d3..000000000 --- a/vendor/github.com/aws/smithy-go/tracing/context.go +++ /dev/null @@ -1,96 +0,0 @@ -package tracing - -import "context" - -type ( - operationTracerKey struct{} - spanLineageKey struct{} -) - -// GetSpan returns the active trace Span on the context. -// -// The boolean in the return indicates whether a Span was actually in the -// context, but a no-op implementation will be returned if not, so callers -// can generally disregard the boolean unless they wish to explicitly confirm -// presence/absence of a Span. -func GetSpan(ctx context.Context) (Span, bool) { - lineage := getLineage(ctx) - if len(lineage) == 0 { - return nopSpan{}, false - } - - return lineage[len(lineage)-1], true -} - -// WithSpan sets the active trace Span on the context. -func WithSpan(parent context.Context, span Span) context.Context { - lineage := getLineage(parent) - if len(lineage) == 0 { - return context.WithValue(parent, spanLineageKey{}, []Span{span}) - } - - lineage = append(lineage, span) - return context.WithValue(parent, spanLineageKey{}, lineage) -} - -// PopSpan pops the current Span off the context, setting the active Span on -// the returned Context back to its parent and returning the REMOVED one. -// -// PopSpan on a context with no active Span will return a no-op instance. -// -// This is mostly necessary for the runtime to manage base trace spans due to -// the wrapped-function nature of the middleware stack. End-users of Smithy -// clients SHOULD NOT generally be using this API. -func PopSpan(parent context.Context) (context.Context, Span) { - lineage := getLineage(parent) - if len(lineage) == 0 { - return parent, nopSpan{} - } - - span := lineage[len(lineage)-1] - lineage = lineage[:len(lineage)-1] - return context.WithValue(parent, spanLineageKey{}, lineage), span -} - -func getLineage(ctx context.Context) []Span { - v := ctx.Value(spanLineageKey{}) - if v == nil { - return nil - } - - return v.([]Span) -} - -// GetOperationTracer returns the embedded operation-scoped Tracer on a -// Context. -// -// The boolean in the return indicates whether a Tracer was actually in the -// context, but a no-op implementation will be returned if not, so callers -// can generally disregard the boolean unless they wish to explicitly confirm -// presence/absence of a Tracer. -func GetOperationTracer(ctx context.Context) (Tracer, bool) { - v := ctx.Value(operationTracerKey{}) - if v == nil { - return nopTracer{}, false - } - - return v.(Tracer), true -} - -// WithOperationTracer returns a child Context embedding the given Tracer. -// -// The runtime will use this embed a scoped tracer for client operations, -// Smithy/SDK client callers DO NOT need to do this explicitly. -func WithOperationTracer(parent context.Context, tracer Tracer) context.Context { - return context.WithValue(parent, operationTracerKey{}, tracer) -} - -// StartSpan is a convenience API for creating tracing Spans from a Context. -// -// StartSpan uses the operation-scoped Tracer, previously stored using -// [WithOperationTracer], to start the Span. If a Tracer has not been embedded -// the returned Span will be a no-op implementation. -func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { - tracer, _ := GetOperationTracer(ctx) - return tracer.StartSpan(ctx, name, opts...) -} diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go deleted file mode 100644 index 573d28b1c..000000000 --- a/vendor/github.com/aws/smithy-go/tracing/nop.go +++ /dev/null @@ -1,32 +0,0 @@ -package tracing - -import "context" - -// NopTracerProvider is a no-op tracing implementation. -type NopTracerProvider struct{} - -var _ TracerProvider = (*NopTracerProvider)(nil) - -// Tracer returns a tracer which creates no-op spans. -func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return nopTracer{} -} - -type nopTracer struct{} - -var _ Tracer = (*nopTracer)(nil) - -func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { - return ctx, nopSpan{} -} - -type nopSpan struct{} - -var _ Span = (*nopSpan)(nil) - -func (nopSpan) Name() string { return "" } -func (nopSpan) Context() SpanContext { return SpanContext{} } -func (nopSpan) AddEvent(string, ...EventOption) {} -func (nopSpan) SetProperty(any, any) {} -func (nopSpan) SetStatus(SpanStatus) {} -func (nopSpan) End() {} diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go deleted file mode 100644 index 089ed3932..000000000 --- a/vendor/github.com/aws/smithy-go/tracing/tracing.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package tracing defines tracing APIs to be used by Smithy clients. -package tracing - -import ( - "context" - - "github.com/aws/smithy-go" -) - -// SpanStatus records the "success" state of an observed span. -type SpanStatus int - -// Enumeration of SpanStatus. -const ( - SpanStatusUnset SpanStatus = iota - SpanStatusOK - SpanStatusError -) - -// SpanKind indicates the nature of the work being performed. -type SpanKind int - -// Enumeration of SpanKind. -const ( - SpanKindInternal SpanKind = iota - SpanKindClient - SpanKindServer - SpanKindProducer - SpanKindConsumer -) - -// TracerProvider is the entry point for creating client traces. -type TracerProvider interface { - Tracer(scope string, opts ...TracerOption) Tracer -} - -// TracerOption applies configuration to a tracer. -type TracerOption func(o *TracerOptions) - -// TracerOptions represent configuration for tracers. -type TracerOptions struct { - Properties smithy.Properties -} - -// Tracer is the entry point for creating observed client Spans. -// -// Spans created by tracers propagate by existing on the Context. Consumers of -// the API can use [GetSpan] to pull the active Span from a Context. -// -// Creation of child Spans is implicit through Context persistence. If -// CreateSpan is called with a Context that holds a Span, the result will be a -// child of that Span. -type Tracer interface { - StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) -} - -// SpanOption applies configuration to a span. -type SpanOption func(o *SpanOptions) - -// SpanOptions represent configuration for span events. -type SpanOptions struct { - Kind SpanKind - Properties smithy.Properties -} - -// Span records a conceptually individual unit of work that takes place in a -// Smithy client operation. -type Span interface { - Name() string - Context() SpanContext - AddEvent(name string, opts ...EventOption) - SetStatus(status SpanStatus) - SetProperty(k, v any) - End() -} - -// EventOption applies configuration to a span event. -type EventOption func(o *EventOptions) - -// EventOptions represent configuration for span events. -type EventOptions struct { - Properties smithy.Properties -} - -// SpanContext uniquely identifies a Span. -type SpanContext struct { - TraceID string - SpanID string - IsRemote bool -} - -// IsValid is true when a span has nonzero trace and span IDs. -func (ctx *SpanContext) IsValid() bool { - return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0 -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go deleted file mode 100644 index 58e1ab5ef..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth.go +++ /dev/null @@ -1,21 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// AuthScheme defines an HTTP authentication scheme. -type AuthScheme interface { - SchemeID() string - IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver - Signer() Signer -} - -// Signer defines the interface through which HTTP requests are supplemented -// with an Identity. -type Signer interface { - SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go deleted file mode 100644 index d60cf2a60..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go +++ /dev/null @@ -1,45 +0,0 @@ -package http - -import ( - "context" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/auth" -) - -// NewAnonymousScheme returns the anonymous HTTP auth scheme. -func NewAnonymousScheme() AuthScheme { - return &authScheme{ - schemeID: auth.SchemeIDAnonymous, - signer: &nopSigner{}, - } -} - -// authScheme is parameterized to generically implement the exported AuthScheme -// interface -type authScheme struct { - schemeID string - signer Signer -} - -var _ AuthScheme = (*authScheme)(nil) - -func (s *authScheme) SchemeID() string { - return s.schemeID -} - -func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { - return o.GetIdentityResolver(s.schemeID) -} - -func (s *authScheme) Signer() Signer { - return s.signer -} - -type nopSigner struct{} - -var _ Signer = (*nopSigner)(nil) - -func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error { - return nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go deleted file mode 100644 index bc4ad6e79..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go +++ /dev/null @@ -1,70 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -const contentMD5Header = "Content-Md5" - -// contentMD5Checksum provides a middleware to compute and set -// content-md5 checksum for a http request -type contentMD5Checksum struct { -} - -// AddContentChecksumMiddleware adds checksum middleware to middleware's -// build step. -func AddContentChecksumMiddleware(stack *middleware.Stack) error { - // This middleware must be executed before request body is set. - return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) -} - -// ID returns the identifier for the checksum middleware -func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } - -// HandleBuild adds behavior to compute md5 checksum and add content-md5 header -// on http request -func (m *contentMD5Checksum) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if Content-MD5 header is already present, return - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return next.HandleBuild(ctx, in) - } - - // fetch the request stream. - stream := req.GetStream() - // compute checksum if payload is explicit - if stream != nil { - if !req.IsStreamSeekable() { - return out, metadata, fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) - } - - // reset the request stream - if err := req.RewindStream(); err != nil { - return out, metadata, fmt.Errorf( - "error rewinding request stream after computing md5 checksum, %w", err) - } - - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - } - - // set md5 header value - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go deleted file mode 100644 index 0fceae81d..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/client.go +++ /dev/null @@ -1,161 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - smithy "github.com/aws/smithy-go" - "github.com/aws/smithy-go/metrics" - "github.com/aws/smithy-go/middleware" - "github.com/aws/smithy-go/tracing" -) - -// ClientDo provides the interface for custom HTTP client implementations. -type ClientDo interface { - Do(*http.Request) (*http.Response, error) -} - -// ClientDoFunc provides a helper to wrap a function as an HTTP client for -// round tripping requests. -type ClientDoFunc func(*http.Request) (*http.Response, error) - -// Do will invoke the underlying func, returning the result. -func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { - return fn(r) -} - -// ClientHandler wraps a client that implements the HTTP Do method. Standard -// implementation is http.Client. -type ClientHandler struct { - client ClientDo - - Meter metrics.Meter // For HTTP client metrics. -} - -// NewClientHandler returns an initialized middleware handler for the client. -// -// Deprecated: Use [NewClientHandlerWithOptions]. -func NewClientHandler(client ClientDo) ClientHandler { - return NewClientHandlerWithOptions(client) -} - -// NewClientHandlerWithOptions returns an initialized middleware handler for the client -// with applied options. -func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler { - h := ClientHandler{ - client: client, - } - for _, opt := range opts { - opt(&h) - } - if h.Meter == nil { - h.Meter = metrics.NopMeterProvider{}.Meter("") - } - return h -} - -// Handle implements the middleware Handler interface, that will invoke the -// underlying HTTP client. Requires the input to be a Smithy *Request. Returns -// a smithy *Response, or error if the request failed. -func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( - out interface{}, metadata middleware.Metadata, err error, -) { - ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest") - defer span.End() - - ctx, client, err := withMetrics(ctx, c.client, c.Meter) - if err != nil { - return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err) - } - - req, ok := input.(*Request) - if !ok { - return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) - } - - builtRequest := req.Build(ctx) - if err := ValidateEndpointHost(builtRequest.Host); err != nil { - return nil, metadata, err - } - - span.SetProperty("http.method", req.Method) - span.SetProperty("http.request_content_length", -1) // at least indicate unknown - length, ok, err := req.StreamLength() - if err != nil { - return nil, metadata, err - } - if ok { - span.SetProperty("http.request_content_length", length) - } - - resp, err := client.Do(builtRequest) - if resp == nil { - // Ensure a http response value is always present to prevent unexpected - // panics. - resp = &http.Response{ - Header: http.Header{}, - Body: http.NoBody, - } - } - if err != nil { - err = &RequestSendError{Err: err} - - // Override the error with a context canceled error, if that was canceled. - select { - case <-ctx.Done(): - err = &smithy.CanceledError{Err: ctx.Err()} - default: - } - } - - // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. - // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the - // stream reference so that it can be safely reused. - if builtRequest.Body != nil { - _ = builtRequest.Body.Close() - } - - span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor)) - span.SetProperty("http.status_code", resp.StatusCode) - span.SetProperty("http.response_content_length", resp.ContentLength) - - return &Response{Response: resp}, metadata, err -} - -// RequestSendError provides a generic request transport error. This error -// should wrap errors making HTTP client requests. -// -// The ClientHandler will wrap the HTTP client's error if the client request -// fails, and did not fail because of context canceled. -type RequestSendError struct { - Err error -} - -// ConnectionError returns that the error is related to not being able to send -// the request, or receive a response from the service. -func (e *RequestSendError) ConnectionError() bool { - return true -} - -// Unwrap returns the underlying error, if there was one. -func (e *RequestSendError) Unwrap() error { - return e.Err -} - -func (e *RequestSendError) Error() string { - return fmt.Sprintf("request send failed, %v", e.Err) -} - -// NopClient provides a client that ignores the request, and returns an empty -// successful HTTP response value. -type NopClient struct{} - -// Do ignores the request and returns a 200 status empty response. -func (NopClient) Do(r *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: 200, - Header: http.Header{}, - Body: http.NoBody, - }, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go deleted file mode 100644 index 07366ac85..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package http provides the HTTP transport client and request/response types -needed to round trip API operation calls with an service. -*/ -package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go deleted file mode 100644 index cbc9deb4d..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go +++ /dev/null @@ -1,163 +0,0 @@ -package http - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { - values := make([]string, 0, len(vs)) - - for i := 0; i < len(vs); i++ { - parts, err := splitFn(vs[i]) - if err != nil { - return nil, err - } - values = append(values, parts...) - } - - return values, nil -} - -// SplitHeaderListValues attempts to split the elements of the slice by commas, -// and return a list of all values separated. Returns error if unable to -// separate the values. -func SplitHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, quotedCommaSplit) -} - -func quotedCommaSplit(v string) (parts []string, err error) { - v = strings.TrimSpace(v) - - expectMore := true - for i := 0; i < len(v); i++ { - if unicode.IsSpace(rune(v[i])) { - continue - } - expectMore = false - - // leading space in part is ignored. - // Start of value must be non-space, or quote. - // - // - If quote, enter quoted mode, find next non-escaped quote to - // terminate the value. - // - Otherwise, find next comma to terminate value. - - remaining := v[i:] - - var value string - var valueLen int - if remaining[0] == '"' { - //------------------------------ - // Quoted value - //------------------------------ - var j int - var skipQuote bool - for j += 1; j < len(remaining); j++ { - if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { - skipQuote = !skipQuote - continue - } - if remaining[j] == '"' { - break - } - } - if j == len(remaining) || j == 1 { - return nil, fmt.Errorf("value %v missing closing double quote", - remaining) - } - valueLen = j + 1 - - tail := remaining[valueLen:] - var k int - for ; k < len(tail); k++ { - if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { - return nil, fmt.Errorf("value %v has non-space trailing characters", - remaining) - } - if tail[k] == ',' { - expectMore = true - break - } - } - value = remaining[:valueLen] - value, err = strconv.Unquote(value) - if err != nil { - return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) - } - - // Pad valueLen to include trailing space(s) so `i` is updated correctly. - valueLen += k - - } else { - //------------------------------ - // Unquoted value - //------------------------------ - - // Index of the next comma is the length of the value, or end of string. - valueLen = strings.Index(remaining, ",") - if valueLen != -1 { - expectMore = true - } else { - valueLen = len(remaining) - } - value = strings.TrimSpace(remaining[:valueLen]) - } - - i += valueLen - parts = append(parts, value) - - } - - if expectMore { - parts = append(parts, "") - } - - return parts, nil -} - -// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date -// timestamp values in the slice by commas, and return a list of all values -// separated. The split is aware of the HTTP-Date timestamp format, and will skip -// comma within the timestamp value. Returns an error if unable to split the -// timestamp values. -func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { - return splitHeaderListValues(vs, splitHTTPDateHeaderValue) -} - -func splitHTTPDateHeaderValue(v string) ([]string, error) { - if n := strings.Count(v, ","); n <= 1 { - // Nothing to do if only contains a no, or single HTTPDate value - return []string{v}, nil - } else if n%2 == 0 { - return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) - } - - var parts []string - var i, j int - - var doSplit bool - for ; i < len(v); i++ { - if v[i] == ',' { - if doSplit { - doSplit = false - parts = append(parts, strings.TrimSpace(v[j:i])) - j = i + 1 - } else { - // Skip the first comma in the timestamp value since that - // separates the day from the rest of the timestamp. - // - // Tue, 17 Dec 2019 23:48:18 GMT - doSplit = true - } - } - } - // Add final part - if j < len(v) { - parts = append(parts, strings.TrimSpace(v[j:])) - } - - return parts, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go deleted file mode 100644 index 6b290fec0..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ /dev/null @@ -1,89 +0,0 @@ -package http - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// ValidateEndpointHost validates that the host string passed in is a valid RFC -// 3986 host. Returns error if the host is not valid. -func ValidateEndpointHost(host string) error { - var errors strings.Builder - var hostname string - var port string - var err error - - if strings.Contains(host, ":") { - hostname, port, err = net.SplitHostPort(host) - if err != nil { - errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) - errors.WriteString(err.Error()) - } - - if !ValidPortNumber(port) { - errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) - } - } else { - hostname = host - } - - labels := strings.Split(hostname, ".") - for i, label := range labels { - if i == len(labels)-1 && len(label) == 0 { - // Allow trailing dot for FQDN hosts. - continue - } - - if !ValidHostLabel(label) { - errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") - errors.WriteString(label) - } - } - - if len(hostname) == 0 && len(port) != 0 { - errors.WriteString("\nendpoint host with port must not be empty") - } - - if len(hostname) > 255 { - errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) - } - - if len(errors.String()) > 0 { - return fmt.Errorf("invalid endpoint host%s", errors.String()) - } - return nil -} - -// ValidPortNumber returns whether the port is valid RFC 3986 port. -func ValidPortNumber(port string) bool { - i, err := strconv.Atoi(port) - if err != nil { - return false - } - - if i < 0 || i > 65535 { - return false - } - return true -} - -// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. -func ValidHostLabel(label string) bool { - if l := len(label); l == 0 || l > 63 { - return false - } - for _, r := range label { - switch { - case r >= '0' && r <= '9': - case r >= 'A' && r <= 'Z': - case r >= 'a' && r <= 'z': - case r == '-': - default: - return false - } - } - - return true -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go deleted file mode 100644 index 941a8d6b5..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go +++ /dev/null @@ -1,75 +0,0 @@ -package io - -import ( - "io" - "sync" -) - -// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. -func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { - sr := &safeReadCloser{ - readCloser: readCloser, - } - - if _, ok := readCloser.(io.WriterTo); ok { - return &safeWriteToReadCloser{safeReadCloser: sr} - } - - return sr -} - -// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic -// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this -// type. -type safeWriteToReadCloser struct { - *safeReadCloser -} - -// WriteTo implements the io.WriteTo interface. -func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { - r.safeReadCloser.mtx.Lock() - defer r.safeReadCloser.mtx.Unlock() - - if r.safeReadCloser.closed { - return 0, io.EOF - } - - return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) -} - -// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser -// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type -// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime -// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. -// This type is thread-safe. -type safeReadCloser struct { - readCloser io.ReadCloser - closed bool - mtx sync.Mutex -} - -// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. -func (r *safeReadCloser) Read(p []byte) (n int, err error) { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return 0, io.EOF - } - - return r.readCloser.Read(p) -} - -// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error -// reported from Close. Subsequent calls to Close will always return a nil error. -func (r *safeReadCloser) Close() error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.closed { - return nil - } - - r.closed = true - rc := r.readCloser - r.readCloser = nil - return rc.Close() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go deleted file mode 100644 index 5d6a4b23a..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go +++ /dev/null @@ -1,25 +0,0 @@ -package http - -import ( - "crypto/md5" - "encoding/base64" - "fmt" - "io" -) - -// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. -// Returns the byte slice of md5 checksum and an error. -func computeMD5Checksum(r io.Reader) ([]byte, error) { - h := md5.New() - // copy errors may be assumed to be from the body. - _, err := io.Copy(h, r) - if err != nil { - return nil, fmt.Errorf("failed to read body: %w", err) - } - - // encode the md5 checksum in base64. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - return sum64, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go deleted file mode 100644 index ab1101394..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/metrics.go +++ /dev/null @@ -1,184 +0,0 @@ -package http - -import ( - "context" - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" - - "github.com/aws/smithy-go/metrics" -) - -var now = time.Now - -// withMetrics instruments an HTTP client and context to collect HTTP metrics. -func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) ( - context.Context, ClientDo, error, -) { - hm, err := newHTTPMetrics(meter) - if err != nil { - return nil, nil, err - } - - ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{ - DNSStart: hm.DNSStart, - ConnectStart: hm.ConnectStart, - TLSHandshakeStart: hm.TLSHandshakeStart, - - GotConn: hm.GotConn(parent), - PutIdleConn: hm.PutIdleConn(parent), - ConnectDone: hm.ConnectDone(parent), - DNSDone: hm.DNSDone(parent), - TLSHandshakeDone: hm.TLSHandshakeDone(parent), - GotFirstResponseByte: hm.GotFirstResponseByte(parent), - }) - return ctx, &timedClientDo{client, hm}, nil -} - -type timedClientDo struct { - ClientDo - hm *httpMetrics -} - -func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { - c.hm.doStart = now() - resp, err := c.ClientDo.Do(r) - - c.hm.DoRequestDuration.Record(r.Context(), elapsed(c.hm.doStart)) - return resp, err -} - -type httpMetrics struct { - DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration - ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration - TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration - ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage - - DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration - TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte - - doStart time.Time - dnsStart time.Time - connectStart time.Time - tlsStart time.Time -} - -func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { - hm := &httpMetrics{} - - var err error - hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes a request to perform DNS lookup." - }) - if err != nil { - return nil, err - } - hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes a request to acquire a connection." - }) - if err != nil { - return nil, err - } - hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "The time it takes an HTTP request to perform the TLS handshake." - }) - if err != nil { - return nil, err - } - hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "{connection}" - o.Description = "Current state of connections pool." - }) - if err != nil { - return nil, err - } - hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "Time spent performing an entire HTTP transaction." - }) - if err != nil { - return nil, err - } - hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) { - o.UnitLabel = "s" - o.Description = "Time from start of transaction to when the first response byte is available." - }) - if err != nil { - return nil, err - } - - return hm, nil -} - -func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { - m.dnsStart = now() -} - -func (m *httpMetrics) ConnectStart(string, string) { - m.connectStart = now() -} - -func (m *httpMetrics) TLSHandshakeStart() { - m.tlsStart = now() -} - -func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { - return func(httptrace.GotConnInfo) { - m.addConnAcquired(ctx, 1) - } -} - -func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { - return func(error) { - m.addConnAcquired(ctx, -1) - } -} - -func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { - return func(httptrace.DNSDoneInfo) { - m.DNSLookupDuration.Record(ctx, elapsed(m.dnsStart)) - } -} - -func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { - return func(string, string, error) { - m.ConnectDuration.Record(ctx, elapsed(m.connectStart)) - } -} - -func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { - return func(tls.ConnectionState, error) { - m.TLSHandshakeDuration.Record(ctx, elapsed(m.tlsStart)) - } -} - -func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { - return func() { - m.TimeToFirstByte.Record(ctx, elapsed(m.doStart)) - } -} - -func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) { - m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("state", "acquired") - }) -} - -// Not used: it is recommended to track acquired vs idle conn, but we can't -// determine when something is truly idle with the current HTTP client hooks -// available to us. -func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { - m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { - o.Properties.Set("state", "idle") - }) -} - -func elapsed(start time.Time) float64 { - end := now() - elapsed := end.Sub(start) - return float64(elapsed) / 1e9 -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go deleted file mode 100644 index 1d3b218a1..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" - "io" - "io/ioutil" -) - -// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically -// close the response body of an operation request if the request response -// failed. -func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) -} - -type errorCloseResponseBodyMiddleware struct{} - -func (*errorCloseResponseBodyMiddleware) ID() string { - return "ErrorCloseResponseBody" -} - -func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { - // Consume the full body to prevent TCP connection resets on some platforms - _, _ = io.Copy(ioutil.Discard, resp.Body) - // Do not validate that the response closes successfully. - resp.Body.Close() - } - } - - return out, metadata, err -} - -// AddCloseResponseBodyMiddleware adds the middleware to automatically close -// the response body of an operation request, after the response had been -// deserialized. -func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { - return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) -} - -type closeResponseBody struct{} - -func (*closeResponseBody) ID() string { - return "CloseResponseBody" -} - -func (m *closeResponseBody) HandleDeserialize( - ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - output middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err := next.HandleDeserialize(ctx, input) - if err != nil { - return out, metadata, err - } - - if resp, ok := out.RawResponse.(*Response); ok { - // Consume the full body to prevent TCP connection resets on some platforms - _, copyErr := io.Copy(ioutil.Discard, resp.Body) - if copyErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") - } - - closeErr := resp.Body.Close() - if closeErr != nil { - middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go deleted file mode 100644 index 9969389bb..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go +++ /dev/null @@ -1,84 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -// ComputeContentLength provides a middleware to set the content-length -// header for the length of a serialize request body. -type ComputeContentLength struct { -} - -// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware -// stack's Build step. -func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&ComputeContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *ComputeContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // do nothing if request content-length was set to 0 or above. - if req.ContentLength >= 0 { - return next.HandleBuild(ctx, in) - } - - // attempt to compute stream length - if n, ok, err := req.StreamLength(); err != nil { - return out, metadata, fmt.Errorf( - "failed getting length of request stream, %w", err) - } else if ok { - req.ContentLength = n - } - - return next.HandleBuild(ctx, in) -} - -// validateContentLength provides a middleware to validate the content-length -// is valid (greater than zero), for the serialized request payload. -type validateContentLength struct{} - -// ValidateContentLengthHeader adds middleware that validates request content-length -// is set to value greater than zero. -func ValidateContentLengthHeader(stack *middleware.Stack) error { - return stack.Build.Add(&validateContentLength{}, middleware.After) -} - -// ID returns the identifier for the ComputeContentLength. -func (m *validateContentLength) ID() string { return "ValidateContentLength" } - -// HandleBuild adds the length of the serialized request to the HTTP header -// if the length can be determined. -func (m *validateContentLength) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, -) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown request type %T", req) - } - - // if request content-length was set to less than 0, return an error - if req.ContentLength < 0 { - return out, metadata, fmt.Errorf( - "content length for payload is required and must be at least 0") - } - - return next.HandleBuild(ctx, in) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go deleted file mode 100644 index 855c22720..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go +++ /dev/null @@ -1,81 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - - "github.com/aws/smithy-go/middleware" -) - -// WithHeaderComment instruments a middleware stack to append an HTTP field -// comment to the given header as specified in RFC 9110 -// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). -// -// The header is case-insensitive. If the provided header exists when the -// middleware runs, the content will be inserted as-is enclosed in parentheses. -// -// Note that per the HTTP specification, comments are only allowed in fields -// containing "comment" as part of their field value definition, but this API -// will NOT verify whether the provided header is one of them. -// -// WithHeaderComment MAY be applied more than once to a middleware stack and/or -// more than once per header. -func WithHeaderComment(header, content string) func(*middleware.Stack) error { - return func(s *middleware.Stack) error { - m, err := getOrAddHeaderComment(s) - if err != nil { - return fmt.Errorf("get or add header comment: %v", err) - } - - m.values.Add(header, content) - return nil - } -} - -type headerCommentMiddleware struct { - values http.Header // hijack case-insensitive access APIs -} - -func (*headerCommentMiddleware) ID() string { - return "headerComment" -} - -func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, -) { - r, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for h, contents := range m.values { - for _, c := range contents { - if existing := r.Header.Get(h); existing != "" { - r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) - } - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { - id := (*headerCommentMiddleware)(nil).ID() - m, ok := s.Build.Get(id) - if !ok { - m := &headerCommentMiddleware{values: http.Header{}} - if err := s.Build.Add(m, middleware.After); err != nil { - return nil, fmt.Errorf("add build: %v", err) - } - - return m, nil - } - - hc, ok := m.(*headerCommentMiddleware) - if !ok { - return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) - } - - return hc, nil -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go deleted file mode 100644 index eac32b4ba..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go +++ /dev/null @@ -1,167 +0,0 @@ -package http - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/middleware" -) - -type isContentTypeAutoSet struct{} - -// SetIsContentTypeDefaultValue returns a Context specifying if the request's -// content-type header was set to a default value. -func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { - return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) -} - -// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the -// request is a default value that was auto assigned by an operation -// serializer. Allows middleware post serialization to know if the content-type -// was auto set to a default value or not. -// -// Also returns false if the Context value was never updated to include if -// content-type was set to a default value. -func GetIsContentTypeDefaultValue(ctx context.Context) bool { - v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) - return v -} - -// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover -// middleware to the stack after the operation serializer. This middleware will -// remove the content-type header from the request if it was set as a default -// value, and no request payload is present. -// -// Returns error if unable to add the middleware. -func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - err = stack.Serialize.Insert(removeDefaultContentType{}, - "OperationSerializer", middleware.After) - if err != nil { - return fmt.Errorf("failed to add %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - } - - return nil -} - -// RemoveNoPayloadDefaultContentTypeRemover removes the -// DefaultContentTypeRemover middleware from the stack. Returns an error if -// unable to remove the middleware. -func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { - _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) - if err != nil { - return fmt.Errorf("failed to remove %s serialize middleware, %w", - removeDefaultContentType{}.ID(), err) - - } - return nil -} - -// removeDefaultContentType provides after serialization middleware that will -// remove the content-type header from an HTTP request if the header was set as -// a default value by the operation serializer, and there is no request payload. -type removeDefaultContentType struct{} - -// ID returns the middleware ID -func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } - -// HandleSerialize implements the serialization middleware. -func (removeDefaultContentType) HandleSerialize( - ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, -) ( - out middleware.SerializeOutput, meta middleware.Metadata, err error, -) { - req, ok := input.Request.(*Request) - if !ok { - return out, meta, fmt.Errorf( - "unexpected request type %T for removeDefaultContentType middleware", - input.Request) - } - - if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { - req.Header.Del("Content-Type") - input.Request = req - } - - return next.HandleSerialize(ctx, input) -} - -type headerValue struct { - header string - value string - append bool -} - -type headerValueHelper struct { - headerValues []headerValue -} - -func (h *headerValueHelper) addHeaderValue(value headerValue) { - h.headerValues = append(h.headerValues, value) -} - -func (h *headerValueHelper) ID() string { - return "HTTPHeaderHelper" -} - -func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { - req, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) - } - - for _, value := range h.headerValues { - if value.append { - req.Header.Add(value.header, value.value) - } else { - req.Header.Set(value.header, value.value) - } - } - - return next.HandleBuild(ctx, in) -} - -func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { - id := (*headerValueHelper)(nil).ID() - m, ok := stack.Build.Get(id) - if !ok { - m = &headerValueHelper{} - err := stack.Build.Add(m, middleware.After) - if err != nil { - return nil, err - } - } - - requestUserAgent, ok := m.(*headerValueHelper) - if !ok { - return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) - } - - return requestUserAgent, nil -} - -// AddHeaderValue returns a stack mutator that adds the header value pair to header. -// Appends to any existing values if present. -func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: true}) - return nil - } -} - -// SetHeaderValue returns a stack mutator that adds the header value pair to header. -// Replaces any existing values if present. -func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { - return func(stack *middleware.Stack) error { - helper, err := getOrAddHeaderValueHelper(stack) - if err != nil { - return err - } - helper.addHeaderValue(headerValue{header: header, value: value, append: false}) - return nil - } -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go deleted file mode 100644 index d5909b0a2..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go +++ /dev/null @@ -1,75 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http/httputil" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally -// their respective bodies. Will not perform any logging if none of the options are set. -type RequestResponseLogger struct { - LogRequest bool - LogRequestWithBody bool - - LogResponse bool - LogResponseWithBody bool -} - -// ID is the middleware identifier. -func (r *RequestResponseLogger) ID() string { - return "RequestResponseLogger" -} - -// HandleDeserialize will log the request and response HTTP messages if configured accordingly. -func (r *RequestResponseLogger) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - if r.LogRequest || r.LogRequestWithBody { - smithyRequest, ok := in.Request.(*Request) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", in) - } - - rc := smithyRequest.Build(ctx) - reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) - if err != nil { - return out, metadata, err - } - - logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) - - if r.LogRequestWithBody { - smithyRequest, err = smithyRequest.SetStream(rc.Body) - if err != nil { - return out, metadata, err - } - in.Request = smithyRequest - } - } - - out, metadata, err = next.HandleDeserialize(ctx, in) - - if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { - smithyResponse, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) - } - - respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) - if err != nil { - return out, metadata, fmt.Errorf("failed to dump response %w", err) - } - - logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go deleted file mode 100644 index d6079b259..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go +++ /dev/null @@ -1,51 +0,0 @@ -package http - -import ( - "context" - - "github.com/aws/smithy-go/middleware" -) - -type ( - hostnameImmutableKey struct{} - hostPrefixDisableKey struct{} -) - -// GetHostnameImmutable retrieves whether the endpoint hostname should be considered -// immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func GetHostnameImmutable(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) - return v -} - -// SetHostnameImmutable sets or modifies whether the request's endpoint hostname -// should be considered immutable or not. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func SetHostnameImmutable(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) -} - -// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is -// disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { - v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) - return v -} - -// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host -// prefixing should be disabled. If value is true, endpoint host prefixing -// will be disabled. -// -// Scoped to stack values. Use middleware#ClearStackValues to clear all stack -// values. -func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { - return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go deleted file mode 100644 index 326cb8a6c..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go +++ /dev/null @@ -1,79 +0,0 @@ -package http - -import ( - "context" - "fmt" - "github.com/aws/smithy-go/middleware" - "strings" -) - -// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum -// HTTP protocol version. -type MinimumProtocolError struct { - proto string - expectedProtoMajor int - expectedProtoMinor int -} - -// Error returns the error message. -func (m *MinimumProtocolError) Error() string { - return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", - m.expectedProtoMajor, m.expectedProtoMinor, m.proto) -} - -// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection -// meets the minimum major ad minor version. -type RequireMinimumProtocol struct { - ProtoMajor int - ProtoMinor int -} - -// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum -// protocol major and minor version. -func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { - return stack.Deserialize.Insert(&RequireMinimumProtocol{ - ProtoMajor: major, - ProtoMinor: minor, - }, "OperationDeserializer", middleware.Before) -} - -// ID returns the middleware identifier string. -func (r *RequireMinimumProtocol) ID() string { - return "RequireMinimumProtocol" -} - -// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor -// protocol version. -func (r *RequireMinimumProtocol) HandleDeserialize( - ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, -) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*Response) - if !ok { - return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) - } - - if !strings.HasPrefix(response.Proto, "HTTP") { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { - return out, metadata, &MinimumProtocolError{ - proto: response.Proto, - expectedProtoMajor: r.ProtoMajor, - expectedProtoMinor: r.ProtoMinor, - } - } - - return out, metadata, err -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go deleted file mode 100644 index c65aa3932..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/properties.go +++ /dev/null @@ -1,80 +0,0 @@ -package http - -import smithy "github.com/aws/smithy-go" - -type ( - sigV4SigningNameKey struct{} - sigV4SigningRegionKey struct{} - - sigV4ASigningNameKey struct{} - sigV4ASigningRegionsKey struct{} - - isUnsignedPayloadKey struct{} - disableDoubleEncodingKey struct{} -) - -// GetSigV4SigningName gets the signing name from Properties. -func GetSigV4SigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningNameKey{}).(string) - return v, ok -} - -// SetSigV4SigningName sets the signing name on Properties. -func SetSigV4SigningName(p *smithy.Properties, name string) { - p.Set(sigV4SigningNameKey{}, name) -} - -// GetSigV4SigningRegion gets the signing region from Properties. -func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4SigningRegionKey{}).(string) - return v, ok -} - -// SetSigV4SigningRegion sets the signing region on Properties. -func SetSigV4SigningRegion(p *smithy.Properties, region string) { - p.Set(sigV4SigningRegionKey{}, region) -} - -// GetSigV4ASigningName gets the v4a signing name from Properties. -func GetSigV4ASigningName(p *smithy.Properties) (string, bool) { - v, ok := p.Get(sigV4ASigningNameKey{}).(string) - return v, ok -} - -// SetSigV4ASigningName sets the signing name on Properties. -func SetSigV4ASigningName(p *smithy.Properties, name string) { - p.Set(sigV4ASigningNameKey{}, name) -} - -// GetSigV4ASigningRegion gets the v4a signing region set from Properties. -func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) { - v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string) - return v, ok -} - -// SetSigV4ASigningRegions sets the v4a signing region set on Properties. -func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) { - p.Set(sigV4ASigningRegionsKey{}, regions) -} - -// GetIsUnsignedPayload gets whether the payload is unsigned from Properties. -func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(isUnsignedPayloadKey{}).(bool) - return v, ok -} - -// SetIsUnsignedPayload sets whether the payload is unsigned on Properties. -func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) { - p.Set(isUnsignedPayloadKey{}, isUnsignedPayload) -} - -// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties. -func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) { - v, ok := p.Get(disableDoubleEncodingKey{}).(bool) - return v, ok -} - -// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties. -func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) { - p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go deleted file mode 100644 index 7177d6f95..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ /dev/null @@ -1,189 +0,0 @@ -package http - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - iointernal "github.com/aws/smithy-go/transport/http/internal/io" -) - -// Request provides the HTTP specific request structure for HTTP specific -// middleware steps to use to serialize input, and send an operation's request. -type Request struct { - *http.Request - stream io.Reader - isStreamSeekable bool - streamStartPos int64 -} - -// NewStackRequest returns an initialized request ready to be populated with the -// HTTP request details. Returns empty interface so the function can be used as -// a parameter to the Smithy middleware Stack constructor. -func NewStackRequest() interface{} { - return &Request{ - Request: &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - ContentLength: -1, // default to unknown length - }, - } -} - -// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. -func (r *Request) IsHTTPS() bool { - if r.URL == nil { - return false - } - return strings.EqualFold(r.URL.Scheme, "https") -} - -// Clone returns a deep copy of the Request for the new context. A reference to -// the Stream is copied, but the underlying stream is not copied. -func (r *Request) Clone() *Request { - rc := *r - rc.Request = rc.Request.Clone(context.TODO()) - return &rc -} - -// StreamLength returns the number of bytes of the serialized stream attached -// to the request and ok set. If the length cannot be determined, an error will -// be returned. -func (r *Request) StreamLength() (size int64, ok bool, err error) { - return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) -} - -func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { - if stream == nil { - return 0, true, nil - } - - if l, ok := stream.(interface{ Len() int }); ok { - return int64(l.Len()), true, nil - } - - if !seekable { - return 0, false, nil - } - - s := stream.(io.Seeker) - endOffset, err := s.Seek(0, io.SeekEnd) - if err != nil { - return 0, false, err - } - - // The reason to seek to streamStartPos instead of 0 is to ensure that the - // SDK only sends the stream from the starting position the user's - // application provided it to the SDK at. For example application opens a - // file, and wants to skip the first N bytes uploading the rest. The - // application would move the file's offset N bytes, then hand it off to - // the SDK to send the remaining. The SDK should respect that initial offset. - _, err = s.Seek(startPos, io.SeekStart) - if err != nil { - return 0, false, err - } - - return endOffset - startPos, true, nil -} - -// RewindStream will rewind the io.Reader to the relative start position if it -// is an io.Seeker. -func (r *Request) RewindStream() error { - // If there is no stream there is nothing to rewind. - if r.stream == nil { - return nil - } - - if !r.isStreamSeekable { - return fmt.Errorf("request stream is not seekable") - } - _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) - return err -} - -// GetStream returns the request stream io.Reader if a stream is set. If no -// stream is present nil will be returned. -func (r *Request) GetStream() io.Reader { - return r.stream -} - -// IsStreamSeekable returns whether the stream is seekable. -func (r *Request) IsStreamSeekable() bool { - return r.isStreamSeekable -} - -// SetStream returns a clone of the request with the stream set to the provided -// reader. May return an error if the provided reader is seekable but returns -// an error. -func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { - rc = r.Clone() - - if reader == http.NoBody { - reader = nil - } - - var isStreamSeekable bool - var streamStartPos int64 - switch v := reader.(type) { - case io.Seeker: - n, err := v.Seek(0, io.SeekCurrent) - if err != nil { - return r, err - } - isStreamSeekable = true - streamStartPos = n - default: - // If the stream length can be determined, and is determined to be empty, - // use a nil stream to prevent confusion between empty vs not-empty - // streams. - length, ok, err := streamLength(reader, false, 0) - if err != nil { - return nil, err - } else if ok && length == 0 { - reader = nil - } - } - - rc.stream = reader - rc.isStreamSeekable = isStreamSeekable - rc.streamStartPos = streamStartPos - - return rc, err -} - -// Build returns a build standard HTTP request value from the Smithy request. -// The request's stream is wrapped in a safe container that allows it to be -// reused for subsequent attempts. -func (r *Request) Build(ctx context.Context) *http.Request { - req := r.Request.Clone(ctx) - - if r.stream == nil && req.ContentLength == -1 { - req.ContentLength = 0 - } - - switch stream := r.stream.(type) { - case *io.PipeReader: - req.Body = ioutil.NopCloser(stream) - req.ContentLength = -1 - default: - // HTTP Client Request must only have a non-nil body if the - // ContentLength is explicitly unknown (-1) or non-zero. The HTTP - // Client will interpret a non-nil body and ContentLength 0 as - // "unknown". This is unwanted behavior. - if req.ContentLength != 0 && r.stream != nil { - req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) - } - } - - return req -} - -// RequestCloner is a function that can take an input request type and clone the request -// for use in a subsequent retry attempt. -func RequestCloner(v interface{}) interface{} { - return v.(*Request).Clone() -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go deleted file mode 100644 index 0c13bfcc8..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/response.go +++ /dev/null @@ -1,34 +0,0 @@ -package http - -import ( - "fmt" - "net/http" -) - -// Response provides the HTTP specific response structure for HTTP specific -// middleware steps to use to deserialize the response from an operation call. -type Response struct { - *http.Response -} - -// ResponseError provides the HTTP centric error type wrapping the underlying -// error with the HTTP response value. -type ResponseError struct { - Response *Response - Err error -} - -// HTTPStatusCode returns the HTTP response status code received from the service. -func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } - -// HTTPResponse returns the HTTP response received from the service. -func (e *ResponseError) HTTPResponse() *Response { return e.Response } - -// Unwrap returns the nested error if any, or nil. -func (e *ResponseError) Unwrap() error { return e.Err } - -func (e *ResponseError) Error() string { - return fmt.Sprintf( - "http response error StatusCode: %d, %v", - e.Response.StatusCode, e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go deleted file mode 100644 index 607b196a8..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/time.go +++ /dev/null @@ -1,13 +0,0 @@ -package http - -import ( - "time" - - smithytime "github.com/aws/smithy-go/time" -) - -// ParseTime parses a time string like the HTTP Date header. This uses a more -// relaxed rule set for date parsing compared to the standard library. -func ParseTime(text string) (t time.Time, err error) { - return smithytime.ParseHTTPDate(text) -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go deleted file mode 100644 index 60a5fc100..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/url.go +++ /dev/null @@ -1,44 +0,0 @@ -package http - -import "strings" - -// JoinPath returns an absolute URL path composed of the two paths provided. -// Enforces that the returned path begins with '/'. If added path is empty the -// returned path suffix will match the first parameter suffix. -func JoinPath(a, b string) string { - if len(a) == 0 { - a = "/" - } else if a[0] != '/' { - a = "/" + a - } - - if len(b) != 0 && b[0] == '/' { - b = b[1:] - } - - if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { - a = a + "/" - } - - return a + b -} - -// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' -// will be collapsed to single separator between values. -func JoinRawQuery(a, b string) string { - a = strings.TrimFunc(a, isAmpersand) - b = strings.TrimFunc(b, isAmpersand) - - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - - return a + "&" + b -} - -func isAmpersand(v rune) bool { - return v == '&' -} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go deleted file mode 100644 index 71a7e0d8a..000000000 --- a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go +++ /dev/null @@ -1,37 +0,0 @@ -package http - -import ( - "strings" -) - -// UserAgentBuilder is a builder for a HTTP User-Agent string. -type UserAgentBuilder struct { - sb strings.Builder -} - -// NewUserAgentBuilder returns a new UserAgentBuilder. -func NewUserAgentBuilder() *UserAgentBuilder { - return &UserAgentBuilder{sb: strings.Builder{}} -} - -// AddKey adds the named component/product to the agent string -func (u *UserAgentBuilder) AddKey(key string) { - u.appendTo(key) -} - -// AddKeyValue adds the named key to the agent string with the given value. -func (u *UserAgentBuilder) AddKeyValue(key, value string) { - u.appendTo(key + "/" + value) -} - -// Build returns the constructed User-Agent string. May be called multiple times. -func (u *UserAgentBuilder) Build() string { - return u.sb.String() -} - -func (u *UserAgentBuilder) appendTo(value string) { - if u.sb.Len() > 0 { - u.sb.WriteRune(' ') - } - u.sb.WriteString(value) -} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go deleted file mode 100644 index b5eedc1f9..000000000 --- a/vendor/github.com/aws/smithy-go/validation.go +++ /dev/null @@ -1,140 +0,0 @@ -package smithy - -import ( - "bytes" - "fmt" - "strings" -) - -// An InvalidParamsError provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type InvalidParamsError struct { - // Context is the base context of the invalid parameter group. - Context string - errs []InvalidParamError -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *InvalidParamsError) Add(err InvalidParamError) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another InvalidParamsError -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e *InvalidParamsError) Len() int { - return len(e.errs) -} - -// Error returns the string formatted form of the invalid parameters. -func (e InvalidParamsError) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Error()) - } - - return w.String() -} - -// Errs returns a slice of the invalid parameters -func (e InvalidParamsError) Errs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An InvalidParamError represents an invalid parameter error type. -type InvalidParamError interface { - error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type invalidParamError struct { - context string - nestedContext string - field string - reason string -} - -// Error returns the string version of the invalid parameter error. -func (e invalidParamError) Error() string { - return fmt.Sprintf("%s, %s.", e.reason, e.Field()) -} - -// Field Returns the field and context the error occurred. -func (e invalidParamError) Field() string { - sb := &strings.Builder{} - sb.WriteString(e.context) - if sb.Len() > 0 { - if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { - sb.WriteRune('.') - } - } - if len(e.nestedContext) > 0 { - sb.WriteString(e.nestedContext) - sb.WriteRune('.') - } - sb.WriteString(e.field) - return sb.String() -} - -// SetContext updates the base context of the error. -func (e *invalidParamError) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *invalidParamError) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - return - } - // Check if our nested context is an index into a slice or map - if e.nestedContext[:1] != "[" { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - return - } - e.nestedContext = ctx + e.nestedContext -} - -// An ParamRequiredError represents an required parameter error. -type ParamRequiredError struct { - invalidParamError -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ParamRequiredError { - return &ParamRequiredError{ - invalidParamError{ - field: field, - reason: fmt.Sprintf("missing required field"), - }, - } -} diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go deleted file mode 100644 index 8d70a03ff..000000000 --- a/vendor/github.com/aws/smithy-go/waiter/logger.go +++ /dev/null @@ -1,36 +0,0 @@ -package waiter - -import ( - "context" - "fmt" - - "github.com/aws/smithy-go/logging" - "github.com/aws/smithy-go/middleware" -) - -// Logger is the Logger middleware used by the waiter to log an attempt -type Logger struct { - // Attempt is the current attempt to be logged - Attempt int64 -} - -// ID representing the Logger middleware -func (*Logger) ID() string { - return "WaiterLogger" -} - -// HandleInitialize performs handling of request in initialize stack step -func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( - out middleware.InitializeOutput, metadata middleware.Metadata, err error, -) { - logger := middleware.GetLogger(ctx) - - logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt)) - - return next.HandleInitialize(ctx, in) -} - -// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in -func (m Logger) AddLogger(stack *middleware.Stack) error { - return stack.Initialize.Insert(&m, "SetLogger", middleware.After) -} diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go deleted file mode 100644 index 03e46e2ee..000000000 --- a/vendor/github.com/aws/smithy-go/waiter/waiter.go +++ /dev/null @@ -1,66 +0,0 @@ -package waiter - -import ( - "fmt" - "math" - "time" - - "github.com/aws/smithy-go/rand" -) - -// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count, -// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay -// must always be greater than 0, along with minDelay lesser than or equal to maxDelay. -// -// Returns the computed delay and if next attempt count is possible within the given input time constraints. -// Note that the zeroth attempt results in no delay. -func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) { - // zeroth attempt, no delay - if attempt <= 0 { - return 0, nil - } - - // remainingTime is zero or less, no delay - if remainingTime <= 0 { - return 0, nil - } - - // validate min delay is greater than 0 - if minDelay == 0 { - return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay") - } - - // validate max delay is greater than 0 - if maxDelay == 0 { - return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay") - } - - // Get attempt ceiling to prevent integer overflow. - attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1 - - if attempt > int64(attemptCeiling) { - delay = maxDelay - } else { - // Compute exponential delay based on attempt. - ri := 1 << uint64(attempt-1) - // compute delay - delay = minDelay * time.Duration(ri) - } - - if delay != minDelay { - // randomize to get jitter between min delay and delay value - d, err := rand.CryptoRandInt63n(int64(delay - minDelay)) - if err != nil { - return 0, fmt.Errorf("error computing retry jitter, %w", err) - } - - delay = time.Duration(d) + minDelay - } - - // check if this is the last attempt possible and compute delay accordingly - if remainingTime-delay <= minDelay { - delay = remainingTime - minDelay - } - - return delay, nil -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore deleted file mode 100644 index 50d95c548..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# IDEs -.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md deleted file mode 100644 index 16abdfc08..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. - -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330e..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index 2c56c1e71..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,161 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51cd..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go deleted file mode 100644 index df9d68bce..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ /dev/null @@ -1,97 +0,0 @@ -package backoff - -import ( - "context" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - ctx context.Context - timer Timer - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - ctx: getContext(b), - timer: timer, - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.ctx.Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - t.timer.Start(next) - return t.timer.C() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go deleted file mode 100644 index 8120d0213..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -type Timer interface { - Start(duration time.Duration) - Stop() - C() <-chan time.Time -} - -// defaultTimer implements Timer interface using time.Timer -type defaultTimer struct { - timer *time.Timer -} - -// C returns the timers channel which receives the current time when the timer fires. -func (t *defaultTimer) C() <-chan time.Time { - return t.timer.C -} - -// Start starts the timer to fire after the given duration -func (t *defaultTimer) Start(duration time.Duration) { - if t.timer == nil { - t.timer = time.NewTimer(duration) - } else { - t.timer.Reset(duration) - } -} - -// Stop is called when the timer is not used anymore and resources may be freed. -func (t *defaultTimer) Stop() { - if t.timer != nil { - t.timer.Stop() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE deleted file mode 100644 index 584149b6e..000000000 --- a/vendor/github.com/containerd/containerd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f0277..000000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go deleted file mode 100644 index 6656465ef..000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -import ( - "bufio" - "fmt" - "os" - "sync" -) - -var ( - inUserNS bool - nsOnce sync.Once -) - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - nsOnce.Do(func() { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return - } - inUserNS = true - }) - return inUserNS -} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go deleted file mode 100644 index c67f773d0..000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml deleted file mode 100644 index a695775df..000000000 --- a/vendor/github.com/containerd/log/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - exportloopref # Checks for pointers to enclosing loop variables - - gofmt - - goimports - - gosec - - ineffassign - - misspell - - nolintlint - - revive - - staticcheck - - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 - - unconvert - - unused - - vet - - dupword # Checks for duplicate words in the source code - disable: - - errcheck - -run: - timeout: 5m - skip-dirs: - - api - - cluster - - design - - docs - - docs/man - - releases - - reports - - test # e2e scripts diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE deleted file mode 100644 index 584149b6e..000000000 --- a/vendor/github.com/containerd/log/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md deleted file mode 100644 index 00e084988..000000000 --- a/vendor/github.com/containerd/log/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# log - -A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages. - -This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation. -In the future this package may be replaced with a common go logging interface. - -## Project details - -**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. - diff --git a/vendor/github.com/containerd/log/context.go b/vendor/github.com/containerd/log/context.go deleted file mode 100644 index 20153066f..000000000 --- a/vendor/github.com/containerd/log/context.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package log provides types and functions related to logging, passing -// loggers through a context, and attaching context to the logger. -// -// # Transitional types -// -// This package contains various types that are aliases for types in [logrus]. -// These aliases are intended for transitioning away from hard-coding logrus -// as logging implementation. Consumers of this package are encouraged to use -// the type-aliases from this package instead of directly using their logrus -// equivalent. -// -// The intent is to replace these aliases with locally defined types and -// interfaces once all consumers are no longer directly importing logrus -// types. -// -// IMPORTANT: due to the transitional purpose of this package, it is not -// guaranteed for the full logrus API to be provided in the future. As -// outlined, these aliases are provided as a step to transition away from -// a specific implementation which, as a result, exposes the full logrus API. -// While no decisions have been made on the ultimate design and interface -// provided by this package, we do not expect carrying "less common" features. -package log - -import ( - "context" - "fmt" - - "github.com/sirupsen/logrus" -) - -// G is a shorthand for [GetLogger]. -// -// We may want to define this locally to a package to get package tagged log -// messages. -var G = GetLogger - -// L is an alias for the standard logger. -var L = &Entry{ - Logger: logrus.StandardLogger(), - // Default is three fields plus a little extra room. - Data: make(Fields, 6), -} - -type loggerKey struct{} - -// Fields type to pass to "WithFields". -type Fields = map[string]any - -// Entry is a logging entry. It contains all the fields passed with -// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, -// Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -// -// Entry is a transitional type, and currently an alias for [logrus.Entry]. -type Entry = logrus.Entry - -// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using -// zeros to ensure the formatted time is always the same number of -// characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// Level is a logging level. -type Level = logrus.Level - -// Supported log levels. -const ( - // TraceLevel level. Designates finer-grained informational events - // than [DebugLevel]. - TraceLevel Level = logrus.TraceLevel - - // DebugLevel level. Usually only enabled when debugging. Very verbose - // logging. - DebugLevel Level = logrus.DebugLevel - - // InfoLevel level. General operational entries about what's going on - // inside the application. - InfoLevel Level = logrus.InfoLevel - - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel Level = logrus.WarnLevel - - // ErrorLevel level. Logs errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel Level = logrus.ErrorLevel - - // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits - // even if the logging level is set to Panic. - FatalLevel Level = logrus.FatalLevel - - // PanicLevel level. This is the highest level of severity. Logs and - // then calls panic with the message passed to Debug, Info, ... - PanicLevel Level = logrus.PanicLevel -) - -// SetLevel sets log level globally. It returns an error if the given -// level is not supported. -// -// level can be one of: -// -// - "trace" ([TraceLevel]) -// - "debug" ([DebugLevel]) -// - "info" ([InfoLevel]) -// - "warn" ([WarnLevel]) -// - "error" ([ErrorLevel]) -// - "fatal" ([FatalLevel]) -// - "panic" ([PanicLevel]) -func SetLevel(level string) error { - lvl, err := logrus.ParseLevel(level) - if err != nil { - return err - } - - L.Logger.SetLevel(lvl) - return nil -} - -// GetLevel returns the current log level. -func GetLevel() Level { - return L.Logger.GetLevel() -} - -// OutputFormat specifies a log output format. -type OutputFormat string - -// Supported log output formats. -const ( - // TextFormat represents the text logging format. - TextFormat OutputFormat = "text" - - // JSONFormat represents the JSON logging format. - JSONFormat OutputFormat = "json" -) - -// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). -func SetFormat(format OutputFormat) error { - switch format { - case TextFormat: - L.Logger.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: RFC3339NanoFixed, - FullTimestamp: true, - }) - return nil - case JSONFormat: - L.Logger.SetFormatter(&logrus.JSONFormatter{ - TimestampFormat: RFC3339NanoFixed, - }) - return nil - default: - return fmt.Errorf("unknown log format: %s", format) - } -} - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *Entry { - if logger := ctx.Value(loggerKey{}); logger != nil { - return logger.(*Entry) - } - return L.WithContext(ctx) -} diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes deleted file mode 100644 index a0717e4b3..000000000 --- a/vendor/github.com/containerd/platforms/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml deleted file mode 100644 index a695775df..000000000 --- a/vendor/github.com/containerd/platforms/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters: - enable: - - exportloopref # Checks for pointers to enclosing loop variables - - gofmt - - goimports - - gosec - - ineffassign - - misspell - - nolintlint - - revive - - staticcheck - - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 - - unconvert - - unused - - vet - - dupword # Checks for duplicate words in the source code - disable: - - errcheck - -run: - timeout: 5m - skip-dirs: - - api - - cluster - - design - - docs - - docs/man - - releases - - reports - - test # e2e scripts diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE deleted file mode 100644 index 584149b6e..000000000 --- a/vendor/github.com/containerd/platforms/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md deleted file mode 100644 index 2059de771..000000000 --- a/vendor/github.com/containerd/platforms/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# platforms - -A Go package for formatting, normalizing and matching container platforms. - -This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52). - -## Platform Specifier - -While the OCI platform specifications provide a tool for components to -specify structured information, user input typically doesn't need the full -context and much can be inferred. To solve this problem, this package introduces -"specifiers". A specifier has the format -`||/[/]`. The user can provide either the -operating system or the architecture or both. - -An example of a common specifier is `linux/amd64`. If the host has a default -runtime that matches this, the user can simply provide the component that -matters. For example, if an image provides `amd64` and `arm64` support, the -operating system, `linux` can be inferred, so they only have to provide -`arm64` or `amd64`. Similar behavior is implemented for operating systems, -where the architecture may be known but a runtime may support images from -different operating systems. - -## Project details - -**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go deleted file mode 100644 index 3913ef663..000000000 --- a/vendor/github.com/containerd/platforms/compare.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// platformVector returns an (ordered) vector of appropriate specs.Platform -// objects to try matching for the given platform object (see platforms.Only). -func platformVector(platform specs.Platform) []specs.Platform { - vector := []specs.Platform{platform} - - switch platform.Architecture { - case "amd64": - if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { - for amd64Version--; amd64Version >= 1; amd64Version-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(amd64Version), - }) - } - } - vector = append(vector, specs.Platform{ - Architecture: "386", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - }) - case "arm": - if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { - for armVersion--; armVersion >= 5; armVersion-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(armVersion), - }) - } - } - case "arm64": - variant := platform.Variant - if variant == "" { - variant = "v8" - } - vector = append(vector, platformVector(specs.Platform{ - Architecture: "arm", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: variant, - })...) - } - - return vector -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 -// For arm/v7, will also match arm/v6 and arm/v5 -// For arm/v6, will also match arm/v5 -// For amd64, will also match 386 -func Only(platform specs.Platform) MatchComparer { - return Ordered(platformVector(Normalize(platform))...) -} - -// OnlyStrict returns a match comparer for a single platform. -// -// Unlike Only, OnlyStrict does not match sub platforms. -// So, "arm/vN" will not match "arm/vM" where M < N, -// and "amd64" will not also match "386". -// -// OnlyStrict matches non-canonical forms. -// So, "arm64" matches "arm/64/v8". -func OnlyStrict(platform specs.Platform) MatchComparer { - return Ordered(Normalize(platform)) -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/vendor/github.com/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go deleted file mode 100644 index 91f50e8c8..000000000 --- a/vendor/github.com/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "sync" - - "github.com/containerd/log" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -// Don't use this value directly; call cpuVariant() instead. -var cpuVariantValue string - -var cpuVariantOnce sync.Once - -func cpuVariant() string { - cpuVariantOnce.Do(func() { - if isArmArch(runtime.GOARCH) { - var err error - cpuVariantValue, err = getCPUVariant() - if err != nil { - log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) - } - } - }) - return cpuVariantValue -} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go deleted file mode 100644 index 98c7001f9..000000000 --- a/vendor/github.com/containerd/platforms/cpuinfo_linux.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strings" - - "golang.org/x/sys/unix" -) - -// getMachineArch retrieves the machine architecture through system call -func getMachineArch() (string, error) { - var uname unix.Utsname - err := unix.Uname(&uname) - if err != nil { - return "", err - } - - arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) - - return arch, nil -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound) -} - -// getCPUVariantFromArch get CPU variant from arch through a system call -func getCPUVariantFromArch(arch string) (string, error) { - - var variant string - - arch = strings.ToLower(arch) - - if arch == "aarch64" { - variant = "8" - } else if arch[0:4] == "armv" && len(arch) >= 5 { - // Valid arch format is in form of armvXx - switch arch[3:5] { - case "v8": - variant = "8" - case "v7": - variant = "7" - case "v6": - variant = "6" - case "v5": - variant = "5" - case "v4": - variant = "4" - case "v3": - variant = "3" - default: - variant = "unknown" - } - } else { - return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument) - } - return variant, nil -} - -// getCPUVariant returns cpu variant for ARM -// We first try reading "Cpu architecture" field from /proc/cpuinfo -// If we can't find it, then fall back using a system call -// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo -// was not present. -func getCPUVariant() (string, error) { - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - if errors.Is(err, errNotFound) { - // Let's try getting CPU variant from machine architecture - arch, err := getMachineArch() - if err != nil { - return "", fmt.Errorf("failure getting machine architecture: %v", err) - } - - variant, err = getCPUVariantFromArch(arch) - if err != nil { - return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) - } - } else { - return "", fmt.Errorf("failure getting CPU variant: %v", err) - } - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant, nil -} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go deleted file mode 100644 index 97a1fe8a3..000000000 --- a/vendor/github.com/containerd/platforms/cpuinfo_other.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" -) - -func getCPUVariant() (string, error) { - - var variant string - - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - } else if runtime.GOOS == "freebsd" { - // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) - // detecting those variants is currently unimplemented - switch runtime.GOARCH { - case "arm64": - variant = "v8" - default: - variant = "unknown" - } - } else { - return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented) - } - - return variant, nil -} diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go deleted file mode 100644 index 2e26fd3b4..000000000 --- a/vendor/github.com/containerd/platforms/database.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64", "amd64": - arch = "amd64" - if variant == "v1" { - variant = "" - } - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/vendor/github.com/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go deleted file mode 100644 index 9d898d60e..000000000 --- a/vendor/github.com/containerd/platforms/defaults.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// DefaultString returns the default string specifier for the platform, -// with [PR#6](https://github.com/containerd/platforms/pull/6) the result -// may now also include the OSVersion from the provided platform specification. -func DefaultString() string { - return FormatAll(DefaultSpec()) -} - -// DefaultStrict returns strict form of Default. -func DefaultStrict() MatchComparer { - return OnlyStrict(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go deleted file mode 100644 index 72355ca85..000000000 --- a/vendor/github.com/containerd/platforms/defaults_darwin.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - // darwin runtime also supports Linux binary via runu/LKL - OS: "linux", - Architecture: runtime.GOARCH, - }) -} diff --git a/vendor/github.com/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go deleted file mode 100644 index d3fe89e07..000000000 --- a/vendor/github.com/containerd/platforms/defaults_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - }) -} diff --git a/vendor/github.com/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go deleted file mode 100644 index 44acc47eb..000000000 --- a/vendor/github.com/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !windows && !darwin && !freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go deleted file mode 100644 index 427ed72eb..000000000 --- a/vendor/github.com/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - major, minor, build := windows.RtlGetNtVersionNumbers() - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -type windowsmatcher struct { - specs.Platform - osVersionPrefix string - defaultMatcher Matcher -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m windowsmatcher) Match(p specs.Platform) bool { - match := m.defaultMatcher.Match(p) - - if match && m.OS == "windows" { - // HPC containers do not have OS version filled - if m.OSVersion == "" || p.OSVersion == "" { - return true - } - - hostOsVersion := getOSVersion(m.osVersionPrefix) - ctrOsVersion := getOSVersion(p.OSVersion) - return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion) - } - - return match -} - -func getOSVersion(osVersionPrefix string) osVersion { - parts := strings.Split(osVersionPrefix, ".") - if len(parts) < 3 { - return osVersion{} - } - - majorVersion, _ := strconv.Atoi(parts[0]) - minorVersion, _ := strconv.Atoi(parts[1]) - buildNumber, _ := strconv.Atoi(parts[2]) - - return osVersion{ - MajorVersion: uint8(majorVersion), - MinorVersion: uint8(minorVersion), - Build: uint16(buildNumber), - } -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -func prefix(v string) string { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return v - } - return strings.Join(parts[0:3], ".") -} - -// Default returns the current platform's default platform specification. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go deleted file mode 100644 index 5ad721e77..000000000 --- a/vendor/github.com/containerd/platforms/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import "errors" - -// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs], -// however, they are not exported as they are not expected to be used as sentinel -// errors by consumers of this package. -// -//nolint:unused // not all errors are used on all platforms. -var ( - errNotFound = errors.New("not found") - errInvalidArgument = errors.New("invalid argument") - errNotImplemented = errors.New("not implemented") -) diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_compat_windows.go deleted file mode 100644 index 89e66f0c0..000000000 --- a/vendor/github.com/containerd/platforms/platform_compat_windows.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// osVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type osVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// Windows Client and Server build numbers. -// -// See: -// https://learn.microsoft.com/en-us/windows/release-health/release-information -// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info -// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information -const ( - // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server - // 2019 (ltsc2019), and Windows 10 (October 2018 Update). - rs5 = 17763 - - // v21H2Server corresponds to Windows Server 2022 (ltsc2022). - v21H2Server = 20348 - - // v22H2Win11 corresponds to Windows 11 (2022 Update). - v22H2Win11 = 22621 -) - -// List of stable ABI compliant ltsc releases -// Note: List must be sorted in ascending order -var compatLTSCReleases = []uint16{ - v21H2Server, -} - -// CheckHostAndContainerCompat checks if given host and container -// OS versions are compatible. -// It includes support for stable ABI compliant versions as well. -// Every release after WS 2022 will support the previous ltsc -// container image. Stable ABI is in preview mode for windows 11 client. -// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility -func checkHostAndContainerCompat(host, ctr osVersion) bool { - // check major minor versions of host and guest - if host.MajorVersion != ctr.MajorVersion || - host.MinorVersion != ctr.MinorVersion { - return false - } - - // If host is < WS 2022, exact version match is required - if host.Build < v21H2Server { - return host.Build == ctr.Build - } - - var supportedLtscRelease uint16 - for i := len(compatLTSCReleases) - 1; i >= 0; i-- { - if host.Build >= compatLTSCReleases[i] { - supportedLtscRelease = compatLTSCReleases[i] - break - } - } - return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build -} diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go deleted file mode 100644 index 1bbbdb91d..000000000 --- a/vendor/github.com/containerd/platforms/platforms.go +++ /dev/null @@ -1,308 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// # Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// # Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// # Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// # ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// Similarly, the most common arm64 version v8, and most common amd64 version v1 -// are represented without the variant. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "fmt" - "path" - "regexp" - "runtime" - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) - osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`) -) - -const osAndVersionFormat = "%s(%s)" - -// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. -type Platform = specs.Platform - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return newDefaultMatcher(platform) -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return FormatAll(m.Platform) -} - -// ParseAll parses a list of platform specifiers into a list of platform. -func ParseAll(specifiers []string) ([]specs.Platform, error) { - platforms := make([]specs.Platform, len(specifiers)) - for i, s := range specifiers { - p, err := Parse(s) - if err != nil { - return nil, fmt.Errorf("invalid platform %s: %w", s, err) - } - platforms[i] = p - } - return platforms, nil -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `[()]||[()]/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)` -// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value, -// and an empty string otherwise. -// If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument) - } - - // Limit to 4 elements to prevent unbounded split - parts := strings.SplitN(specifier, "/", 4) - - var p specs.Platform - for i, part := range parts { - if i == 0 { - // First element is [()] - osVer := osAndVersionRe.FindStringSubmatch(part) - if osVer == nil { - return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument) - } - - p.OS = normalizeOS(osVer[1]) - p.OSVersion = osVer[2] - } else { - if !specifierRe.MatchString(part) { - return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument) - } - } - } - - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS (with or - // without the optional OSVersion specified) and look it up. - // If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant() != "v7" { - p.Variant = cpuVariant() - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument) - case 2: - // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care - // about whether or not we know of the platform. - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// FormatAll returns a string specifier that also includes the OSVersion from the -// provided platform specification. -func FormatAll(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - if platform.OSVersion != "" { - OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion) - return path.Join(OSAndVersion, platform.Architecture, platform.Variant) - } - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - return platform -} diff --git a/vendor/github.com/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go deleted file mode 100644 index 03f4dcd99..000000000 --- a/vendor/github.com/containerd/platforms/platforms_other.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// NewMatcher returns the default Matcher for containerd -func newDefaultMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} diff --git a/vendor/github.com/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go deleted file mode 100644 index 950e2a2dd..000000000 --- a/vendor/github.com/containerd/platforms/platforms_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// NewMatcher returns a Windows matcher that will match on osVersionPrefix if -// the platform is Windows otherwise use the default matcher -func newDefaultMatcher(platform specs.Platform) Matcher { - prefix := prefix(platform.OSVersion) - return windowsmatcher{ - Platform: platform, - osVersionPrefix: prefix, - defaultMatcher: &matcher{ - Platform: Normalize(platform), - }, - } -} diff --git a/vendor/github.com/cpuguy83/dockercfg/LICENSE b/vendor/github.com/cpuguy83/dockercfg/LICENSE deleted file mode 100644 index 8ed681804..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/README.md b/vendor/github.com/cpuguy83/dockercfg/README.md deleted file mode 100644 index 880ab8010..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/README.md +++ /dev/null @@ -1,8 +0,0 @@ -### github.com/cpuguy83/dockercfg -Go library to load docker CLI configs, auths, etc. with minimal deps. -So far the only deps are on the stdlib. - -### Usage -See the [godoc](https://godoc.org/github.com/cpuguy83/dockercfg) for API details. - -I'm currently using this in [zapp](https://github.com/cpuguy83/zapp/blob/d25c43d4cd7ccf29fba184aafbc720a753e1a15d/main.go#L58-L83) to handle registry auth instead of always asking the user to enter it. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/auth.go b/vendor/github.com/cpuguy83/dockercfg/auth.go deleted file mode 100644 index 106ab8479..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/auth.go +++ /dev/null @@ -1,215 +0,0 @@ -package dockercfg - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/fs" - "os/exec" - "runtime" - "strings" -) - -// This is used by the docker CLI in cases where an oauth identity token is used. -// In that case the username is stored literally as `` -// When fetching the credentials we check for this value to determine if. -const tokenUsername = "" - -// GetRegistryCredentials gets registry credentials for the passed in registry host. -// -// This will use [LoadDefaultConfig] to read registry auth details from the config. -// If the config doesn't exist, it will attempt to load registry credentials using the default credential helper for the platform. -func GetRegistryCredentials(hostname string) (string, string, error) { - cfg, err := LoadDefaultConfig() - if err != nil { - if !errors.Is(err, fs.ErrNotExist) { - return "", "", fmt.Errorf("load default config: %w", err) - } - - return GetCredentialsFromHelper("", hostname) - } - - return cfg.GetRegistryCredentials(hostname) -} - -// ResolveRegistryHost can be used to transform a docker registry host name into what is used for the docker config/cred helpers -// -// This is useful for using with containerd authorizers. -// Naturally this only transforms docker hub URLs. -func ResolveRegistryHost(host string) string { - switch host { - case "index.docker.io", "docker.io", "https://index.docker.io/v1/", "registry-1.docker.io": - return "https://index.docker.io/v1/" - } - return host -} - -// GetRegistryCredentials gets credentials, if any, for the provided hostname. -// -// Hostnames should already be resolved using [ResolveRegistryHost]. -// -// If the returned username string is empty, the password is an identity token. -func (c *Config) GetRegistryCredentials(hostname string) (string, string, error) { - h, ok := c.CredentialHelpers[hostname] - if ok { - return GetCredentialsFromHelper(h, hostname) - } - - if c.CredentialsStore != "" { - username, password, err := GetCredentialsFromHelper(c.CredentialsStore, hostname) - if err != nil { - return "", "", fmt.Errorf("get credentials from store: %w", err) - } - - if username != "" || password != "" { - return username, password, nil - } - } - - auth, ok := c.AuthConfigs[hostname] - if !ok { - return GetCredentialsFromHelper("", hostname) - } - - if auth.IdentityToken != "" { - return "", auth.IdentityToken, nil - } - - if auth.Username != "" && auth.Password != "" { - return auth.Username, auth.Password, nil - } - - return DecodeBase64Auth(auth) -} - -// DecodeBase64Auth decodes the legacy file-based auth storage from the docker CLI. -// It takes the "Auth" filed from AuthConfig and decodes that into a username and password. -// -// If "Auth" is empty, an empty user/pass will be returned, but not an error. -func DecodeBase64Auth(auth AuthConfig) (string, string, error) { - if auth.Auth == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(auth.Auth)) - decoded := make([]byte, decLen) - n, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth)) - if err != nil { - return "", "", fmt.Errorf("decode auth: %w", err) - } - - decoded = decoded[:n] - - const sep = ":" - user, pass, found := strings.Cut(string(decoded), sep) - if !found { - return "", "", fmt.Errorf("invalid auth: missing %q separator", sep) - } - - return user, pass, nil -} - -// Errors from credential helpers. -var ( - ErrCredentialsNotFound = errors.New("credentials not found in native keychain") - ErrCredentialsMissingServerURL = errors.New("no credentials server URL") -) - -//nolint:gochecknoglobals // These are used to mock exec in tests. -var ( - // execLookPath is a variable that can be used to mock exec.LookPath in tests. - execLookPath = exec.LookPath - // execCommand is a variable that can be used to mock exec.Command in tests. - execCommand = exec.Command -) - -// GetCredentialsFromHelper attempts to lookup credentials from the passed in docker credential helper. -// -// The credential helper should just be the suffix name (no "docker-credential-"). -// If the passed in helper program is empty this will look up the default helper for the platform. -// -// If the credentials are not found, no error is returned, only empty credentials. -// -// Hostnames should already be resolved using [ResolveRegistryHost] -// -// If the username string is empty, the password string is an identity token. -func GetCredentialsFromHelper(helper, hostname string) (string, string, error) { - if helper == "" { - helper, helperErr := getCredentialHelper() - if helperErr != nil { - return "", "", fmt.Errorf("get credential helper: %w", helperErr) - } - - if helper == "" { - return "", "", nil - } - } - - helper = "docker-credential-" + helper - p, err := execLookPath(helper) - if err != nil { - if !errors.Is(err, exec.ErrNotFound) { - return "", "", fmt.Errorf("look up %q: %w", helper, err) - } - - return "", "", nil - } - - var outBuf, errBuf bytes.Buffer - cmd := execCommand(p, "get") - cmd.Stdin = strings.NewReader(hostname) - cmd.Stdout = &outBuf - cmd.Stderr = &errBuf - - if err = cmd.Run(); err != nil { - out := strings.TrimSpace(outBuf.String()) - switch out { - case ErrCredentialsNotFound.Error(): - return "", "", nil - case ErrCredentialsMissingServerURL.Error(): - return "", "", ErrCredentialsMissingServerURL - default: - return "", "", fmt.Errorf("execute %q stdout: %q stderr: %q: %w", - helper, out, strings.TrimSpace(errBuf.String()), err, - ) - } - } - - var creds struct { - Username string `json:"Username"` - Secret string `json:"Secret"` - } - - if err = json.Unmarshal(outBuf.Bytes(), &creds); err != nil { - return "", "", fmt.Errorf("unmarshal credentials from: %q: %w", helper, err) - } - - // When tokenUsername is used, the output is an identity token and the username is garbage. - if creds.Username == tokenUsername { - creds.Username = "" - } - - return creds.Username, creds.Secret, nil -} - -// getCredentialHelper gets the default credential helper name for the current platform. -func getCredentialHelper() (string, error) { - switch runtime.GOOS { - case "linux": - if _, err := exec.LookPath("pass"); err != nil { - if errors.Is(err, exec.ErrNotFound) { - return "secretservice", nil - } - return "", fmt.Errorf(`look up "pass": %w`, err) - } - return "pass", nil - case "darwin": - return "osxkeychain", nil - case "windows": - return "wincred", nil - default: - return "", nil - } -} diff --git a/vendor/github.com/cpuguy83/dockercfg/config.go b/vendor/github.com/cpuguy83/dockercfg/config.go deleted file mode 100644 index 5e5390794..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockercfg - -// Config represents the on disk format of the docker CLI's config file. -type Config struct { - AuthConfigs map[string]AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only. - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` -} - -// ProxyConfig contains proxy configuration settings. -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` -} - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry. - RegistryToken string `json:"registrytoken,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings. -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` -} diff --git a/vendor/github.com/cpuguy83/dockercfg/load.go b/vendor/github.com/cpuguy83/dockercfg/load.go deleted file mode 100644 index a1c4dca02..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/load.go +++ /dev/null @@ -1,55 +0,0 @@ -package dockercfg - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" -) - -// UserHomeConfigPath returns the path to the docker config in the current user's home dir. -func UserHomeConfigPath() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("user home dir: %w", err) - } - - return filepath.Join(home, ".docker", "config.json"), nil -} - -// ConfigPath returns the path to the docker cli config. -// -// It will either use the DOCKER_CONFIG env var if set, or the value from [UserHomeConfigPath] -// DOCKER_CONFIG would be the dir path where `config.json` is stored, this returns the path to config.json. -func ConfigPath() (string, error) { - if p := os.Getenv("DOCKER_CONFIG"); p != "" { - return filepath.Join(p, "config.json"), nil - } - return UserHomeConfigPath() -} - -// LoadDefaultConfig loads the docker cli config from the path returned from [ConfigPath]. -func LoadDefaultConfig() (Config, error) { - var cfg Config - p, err := ConfigPath() - if err != nil { - return cfg, fmt.Errorf("config path: %w", err) - } - - return cfg, FromFile(p, &cfg) -} - -// FromFile loads config from the specified path into cfg. -func FromFile(configPath string, cfg *Config) error { - f, err := os.Open(configPath) - if err != nil { - return fmt.Errorf("open config: %w", err) - } - defer f.Close() - - if err = json.NewDecoder(f).Decode(&cfg); err != nil { - return fmt.Errorf("decode config: %w", err) - } - - return nil -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2..000000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce945..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f31..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e3388..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/distribution/reference/.gitattributes b/vendor/github.com/distribution/reference/.gitattributes deleted file mode 100644 index d207b1802..000000000 --- a/vendor/github.com/distribution/reference/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/vendor/github.com/distribution/reference/.gitignore b/vendor/github.com/distribution/reference/.gitignore deleted file mode 100644 index dc07e6b04..000000000 --- a/vendor/github.com/distribution/reference/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Cover profiles -*.out diff --git a/vendor/github.com/distribution/reference/.golangci.yml b/vendor/github.com/distribution/reference/.golangci.yml deleted file mode 100644 index 793f0bb7e..000000000 --- a/vendor/github.com/distribution/reference/.golangci.yml +++ /dev/null @@ -1,18 +0,0 @@ -linters: - enable: - - bodyclose - - dupword # Checks for duplicate words in the source code - - gofmt - - goimports - - ineffassign - - misspell - - revive - - staticcheck - - unconvert - - unused - - vet - disable: - - errcheck - -run: - deadline: 2m diff --git a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md deleted file mode 100644 index 48f6704c6..000000000 --- a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). - -Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct. diff --git a/vendor/github.com/distribution/reference/CONTRIBUTING.md b/vendor/github.com/distribution/reference/CONTRIBUTING.md deleted file mode 100644 index ab2194665..000000000 --- a/vendor/github.com/distribution/reference/CONTRIBUTING.md +++ /dev/null @@ -1,114 +0,0 @@ -# Contributing to the reference library - -## Community help - -If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack. -[Click here for an invite to the CNCF community slack](https://slack.cncf.io/) - -## Reporting security issues - -The maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of (or similar for other container tools): - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing Code - -Contributions should be made via pull requests. Pull requests will be reviewed -by one or more maintainers or reviewers and merged when acceptable. - -You should follow the basic GitHub workflow: - - 1. Use your own [fork](https://help.github.com/en/articles/about-forks) - 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) - 3. Test your code - 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) - 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) - -Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes) -for tips on creating a successful contribution. - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/distribution/reference/GOVERNANCE.md b/vendor/github.com/distribution/reference/GOVERNANCE.md deleted file mode 100644 index 200045b05..000000000 --- a/vendor/github.com/distribution/reference/GOVERNANCE.md +++ /dev/null @@ -1,144 +0,0 @@ -# distribution/reference Project Governance - -Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here. - -For specific guidance on practical contribution steps please -see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide. - -## Maintainership - -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. - -## Reviewers - -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM counts towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. - -## Adding maintainers - -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed in a pull request or a -maintainers communication channel. - -After a candidate has been announced to the maintainers, the existing -maintainers are given five business days to discuss the candidate, raise -objections and cast their vote. Votes may take place on the communication -channel or via pull request comment. Candidates must be approved by at least 66% -of the current maintainers by adding their vote on the mailing list. The -reviewer role has the same process but only requires 33% of current maintainers. -Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The voting process may take place inside a pull request if a -maintainer has already discussed the candidacy with the candidate and a -maintainer is willing to be a sponsor by opening the pull request. The candidate -becomes a maintainer once the pull request is merged. - -## Stepping down policy - -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. - -## Removal of inactive maintainers - -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of the current -maintainers. In this case, maintainers should first propose the change to -maintainers via the maintainers communication channel, then open a pull request -for voting. The voting period is five business days. The voting pull request -should not come as a surpise to any maintainer and any discussion related to -performance must not be discussed on the pull request. - -## How are decisions made? - -Docker distribution is an open-source project with an open design philosophy. -This means that the repository is the source of truth for EVERY aspect of the -project, including its philosophy, design, road map, and APIs. *If it's part of -the project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. - -## Helping contributors with the DCO - -The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. - -## I'm a maintainer. Should I make pull requests too? - -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. - -## Conflict Resolution - -If you have a technical dispute that you feel has reached an impasse with a -subset of the community, any contributor may open an issue, specifically -calling for a resolution vote of the current core maintainers to resolve the -dispute. The same voting quorums required (2/3) for adding and removing -maintainers will apply to conflict resolution. diff --git a/vendor/github.com/distribution/reference/LICENSE b/vendor/github.com/distribution/reference/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/distribution/reference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/distribution/reference/MAINTAINERS b/vendor/github.com/distribution/reference/MAINTAINERS deleted file mode 100644 index 9e0a60c8b..000000000 --- a/vendor/github.com/distribution/reference/MAINTAINERS +++ /dev/null @@ -1,26 +0,0 @@ -# Distribution project maintainers & reviewers -# -# See GOVERNANCE.md for maintainer versus reviewer roles -# -# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io) -# GitHub ID, Name, Email address -"chrispat","Chris Patterson","chrispat@github.com" -"clarkbw","Bryan Clark","clarkbw@github.com" -"corhere","Cory Snider","csnider@mirantis.com" -"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com" -"heww","He Weiwei","hweiwei@vmware.com" -"joaodrp","João Pereira","jpereira@gitlab.com" -"justincormack","Justin Cormack","justin.cormack@docker.com" -"squizzi","Kyle Squizzato","ksquizzato@mirantis.com" -"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com" -"sargun","Sargun Dhillon","sargun@sargun.me" -"wy65701436","Wang Yan","wangyan@vmware.com" -"stevelasker","Steve Lasker","steve.lasker@microsoft.com" -# -# REVIEWERS -# GitHub ID, Name, Email address -"dmcgowan","Derek McGowan","derek@mcgstyle.net" -"stevvooe","Stephen Day","stevvooe@gmail.com" -"thajeztah","Sebastiaan van Stijn","github@gone.nl" -"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com" -"Jamstah", "James Hewitt", "james.hewitt@gmail.com" diff --git a/vendor/github.com/distribution/reference/Makefile b/vendor/github.com/distribution/reference/Makefile deleted file mode 100644 index c78576b75..000000000 --- a/vendor/github.com/distribution/reference/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# Project packages. -PACKAGES=$(shell go list ./...) - -# Flags passed to `go test` -BUILDFLAGS ?= -TESTFLAGS ?= - -.PHONY: all build test coverage -.DEFAULT: all - -all: build - -build: ## no binaries to build, so just check compilation suceeds - go build ${BUILDFLAGS} ./... - -test: ## run tests - go test ${TESTFLAGS} ./... - -coverage: ## generate coverprofiles from the unit tests - rm -f coverage.txt - go test ${TESTFLAGS} -cover -coverprofile=cover.out ./... - -.PHONY: help -help: - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md deleted file mode 100644 index 172a02e0b..000000000 --- a/vendor/github.com/distribution/reference/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Distribution reference - -Go library to handle references to container images. - - - -[![Build Status](https://github.com/distribution/reference/actions/workflows/test.yml/badge.svg?branch=main&event=push)](https://github.com/distribution/reference/actions?query=workflow%3ACI) -[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/distribution/reference) -[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-blue.svg)](LICENSE) -[![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) -[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) - -This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. - -## Contribution - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. - -## Communication - -For async communication and long running discussions please use issues and pull requests on the github repo. -This will be the best place to discuss design and implementation. - -For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/) -that everyone is welcome to join and chat about development. - -## Licenses - -The distribution codebase is released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/distribution/reference/SECURITY.md b/vendor/github.com/distribution/reference/SECURITY.md deleted file mode 100644 index aaf983c0f..000000000 --- a/vendor/github.com/distribution/reference/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away! - -Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io. diff --git a/vendor/github.com/distribution/reference/distribution-logo.svg b/vendor/github.com/distribution/reference/distribution-logo.svg deleted file mode 100644 index cc9f4073b..000000000 --- a/vendor/github.com/distribution/reference/distribution-logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/vendor/github.com/distribution/reference/helpers.go b/vendor/github.com/distribution/reference/helpers.go deleted file mode 100644 index d10c7ef83..000000000 --- a/vendor/github.com/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See [path.Match] for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go deleted file mode 100644 index f4128314c..000000000 --- a/vendor/github.com/distribution/reference/normalize.go +++ /dev/null @@ -1,255 +0,0 @@ -package reference - -import ( - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // legacyDefaultDomain is the legacy domain for Docker Hub (which was - // originally named "the Docker Index"). This domain is still used for - // authentication and image search, which were part of the "v1" Docker - // registry specification. - // - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - legacyDefaultDomain = "index.docker.io" - - // defaultDomain is the default domain used for images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - // - // Note that actual domain of Docker Hub's registry is registry-1.docker.io. - // This domain will continue to be supported, but there are plans to consolidate - // legacy domains to new "canonical" domains. Once those domains are decided - // on, we must update the normalization functions, but preserve compatibility - // with existing installs, clients, and user configuration. - defaultDomain = "docker.io" - - // officialRepoPrefix is the namespace used for official images on Docker Hub. - // It is used to normalize "familiar" names to canonical names, for example, - // to convert "ubuntu" to "docker.io/library/ubuntu:latest". - officialRepoPrefix = "library/" - - // defaultTag is the default tag if no tag is provided. - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remote string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remote = remainder[:tagSep] - } else { - remote = remainder - } - if strings.ToLower(remote) != remote { - return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote) - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// namedTaggedDigested is a reference that has both a tag and a digest. -type namedTaggedDigested interface { - NamedTagged - Digested -} - -// ParseDockerRef normalizes the image reference following the docker convention, -// which allows for references to contain both a tag and a digest. It returns a -// reference that is either tagged or digested. For references containing both -// a tag and a digest, it returns a digested reference. For example, the following -// reference: -// -// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// Is returned as a digested reference (with the ":latest" tag removed): -// -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// References that are already "tagged" or "digested" are returned unmodified: -// -// // Already a digested reference -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa -// -// // Already a named reference -// docker.io/library/busybox:latest -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if canonical, ok := named.(namedTaggedDigested); ok { - // The reference is both tagged and digested; only return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - return WithDigest(newNamed, canonical.Digest()) - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remote-name. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remoteName string) { - maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") - if !ok { - // Fast-path for single element ("familiar" names), such as "ubuntu" - // or "ubuntu:latest". Familiar names must be handled separately, to - // prevent them from being handled as "hostname:port". - // - // Canonicalize them as "docker.io/library/name[:tag]" - - // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. - return defaultDomain, officialRepoPrefix + name - } - - switch { - case maybeDomain == localhost: - // localhost is a reserved namespace and always considered a domain. - domain, remoteName = maybeDomain, maybeRemoteName - case maybeDomain == legacyDefaultDomain: - // canonicalize the Docker Hub and legacy "Docker Index" domains. - domain, remoteName = defaultDomain, maybeRemoteName - case strings.ContainsAny(maybeDomain, ".:"): - // Likely a domain or IP-address: - // - // - contains a "." (e.g., "example.com" or "127.0.0.1") - // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") - domain, remoteName = maybeDomain, maybeRemoteName - case strings.ToLower(maybeDomain) != maybeDomain: - // Uppercase namespaces are not allowed, so if the first element - // is not lowercase, we assume it to be a domain-name. - domain, remoteName = maybeDomain, maybeRemoteName - default: - // None of the above: it's not a domain, so use the default, and - // use the name input the remote-name. - domain, remoteName = defaultDomain, name - } - - if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { - // Canonicalize "familiar" names, but only on Docker Hub, not - // on other domains: - // - // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" - remoteName = officialRepoPrefix + remoteName - } - - return domain, remoteName -} - -// familiarizeName returns a shortened version of the name familiar -// to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if strings.HasPrefix(repo.path, officialRepoPrefix) { - // TODO(thaJeztah): this check may be too strict, as it assumes the - // "library/" namespace does not have nested namespaces. While this - // is true (currently), technically it would be possible for Docker - // Hub to use those (e.g. "library/distros/ubuntu:latest"). - // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785. - if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') { - repo.path = remainder - } - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go deleted file mode 100644 index 900398bde..000000000 --- a/vendor/github.com/distribution/reference/reference.go +++ /dev/null @@ -1,432 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] remote-name -// domain := host [':' port-number] -// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A -// domain-name := domain-component ['.' domain-component]* -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// path (or "remote-name") := path-component ['/' path-component]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. - RepositoryNameTotalLengthMax = 255 - - // NameTotalLengthMax is the maximum total number of characters in a repository name. - // - // Deprecated: use [RepositoryNameTotalLengthMax] instead. - NameTotalLengthMax = RepositoryNameTotalLengthMax -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the [Named] reference. -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the [Named] reference. -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -// splitDomain splits a named reference into a hostname and path string. -// If no valid hostname is found, the hostname is empty and the full value -// is returned as name -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - if len(repo.path) > RepositoryNameTotalLengthMax { - return nil, ErrNameTooLong - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - - if len(match[2]) > RepositoryNameTotalLengthMax { - return nil, ErrNameTooLong - } - - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - repo := repository{} - if r, ok := ref.(namedRepository); ok { - repo.domain, repo.path = r.Domain(), r.Path() - } else { - repo.domain, repo.path = splitDomain(ref.Name()) - } - return repo -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/distribution/reference/regexp.go b/vendor/github.com/distribution/reference/regexp.go deleted file mode 100644 index 65bc49d79..000000000 --- a/vendor/github.com/distribution/reference/regexp.go +++ /dev/null @@ -1,163 +0,0 @@ -package reference - -import ( - "regexp" - "strings" -) - -// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:"). -var DigestRegexp = regexp.MustCompile(digestPat) - -// DomainRegexp matches hostname or IP-addresses, optionally including a port -// number. It defines the structure of potential domain components that may be -// part of image names. This is purposely a subset of what is allowed by DNS to -// ensure backwards compatibility with Docker image names. It may be a subset of -// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between -// square brackets (excluding zone identifiers as defined by [RFC 6874] or special -// addresses such as IPv4-Mapped). -// -// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874. -var DomainRegexp = regexp.MustCompile(domainAndPort) - -// IdentifierRegexp is the format for string identifier used as a -// content addressable identifier using sha256. These identifiers -// are like digests without the algorithm, since sha256 is used. -var IdentifierRegexp = regexp.MustCompile(identifier) - -// NameRegexp is the format for the name component of references, including -// an optional domain and port, but without tag or digest suffix. -var NameRegexp = regexp.MustCompile(namePat) - -// ReferenceRegexp is the full supported format of a reference. The regexp -// is anchored and has capturing groups for name, tag, and digest -// components. -var ReferenceRegexp = regexp.MustCompile(referencePat) - -// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go]. -// -// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28 -var TagRegexp = regexp.MustCompile(tag) - -const ( - // alphanumeric defines the alphanumeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphanumeric = `[a-z0-9]+` - - // separator defines the separators allowed to be embedded in name - // components. This allows one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separator = `(?:[._]|__|[-]+)` - - // localhost is treated as a special value for domain-name. Any other - // domain-name without a "." or a ":port" are considered a path component. - localhost = `localhost` - - // domainNameComponent restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp. - domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` - - // optionalPort matches an optional port-number including the port separator - // (e.g. ":80"). - optionalPort = `(?::[0-9]+)?` - - // tag matches valid tag names. From docker/docker:graph/tags.go. - tag = `[\w][\w.-]{0,127}` - - // digestPat matches well-formed digests, including algorithm (e.g. "sha256:"). - // - // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp - // so that go-digest defines the canonical format. Note that the go-digest is - // more relaxed: - // - it allows multiple algorithms (e.g. "sha256+b64:") to allow - // future expansion of supported algorithms. - // - it allows the "" value to use urlsafe base64 encoding as defined - // in [rfc4648, section 5]. - // - // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5. - digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` - - // identifier is the format for a content addressable identifier using sha256. - // These identifiers are like digests without the algorithm, since sha256 is used. - identifier = `([a-f0-9]{64})` - - // ipv6address are enclosed between square brackets and may be represented - // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format - // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as - // IPv4-Mapped are deliberately excluded. - ipv6address = `\[(?:[a-fA-F0-9:]+)\]` -) - -var ( - // domainName defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. This includes IPv4 addresses on decimal format. - domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent) - - // host defines the structure of potential domains based on the URI - // Host subcomponent on rfc3986. It may be a subset of DNS domain name, - // or an IPv4 address in decimal format, or an IPv6 address between square - // brackets (excluding zone identifiers as defined by rfc6874 or special - // addresses such as IPv4-Mapped). - host = `(?:` + domainName + `|` + ipv6address + `)` - - // allowed by the URI Host subcomponent on rfc3986 to ensure backwards - // compatibility with Docker image names. - domainAndPort = host + optionalPort - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = regexp.MustCompile(anchored(tag)) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat)) - - // pathComponent restricts path-components to start with an alphanumeric - // character, with following parts able to be separated by a separator - // (one period, one or two underscore and multiple dashes). - pathComponent = alphanumeric + anyTimes(separator+alphanumeric) - - // remoteName matches the remote-name of a repository. It consists of one - // or more forward slash (/) delimited path-components: - // - // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu" - remoteName = pathComponent + anyTimes(`/`+pathComponent) - namePat = optional(domainAndPort+`/`) + remoteName - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName))) - - referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat))) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier)) -) - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...string) string { - return `(?:` + strings.Join(res, "") + `)?` -} - -// anyTimes wraps the expression in a non-capturing group that can occur -// any number of times. -func anyTimes(res ...string) string { - return `(?:` + strings.Join(res, "") + `)*` -} - -// capture wraps the expression in a capturing group. -func capture(res ...string) string { - return `(` + strings.Join(res, "") + `)` -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...string) string { - return `^` + strings.Join(res, "") + `$` -} diff --git a/vendor/github.com/distribution/reference/sort.go b/vendor/github.com/distribution/reference/sort.go deleted file mode 100644 index 416c37b07..000000000 --- a/vendor/github.com/distribution/reference/sort.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package reference - -import ( - "sort" -) - -// Sort sorts string references preferring higher information references. -// -// The precedence is as follows: -// -// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:") -// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest") -// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:") -// 4. [Named] (e.g., "docker.io/library/busybox") -// 5. [Digested] (e.g., "docker.io@sha256:") -// 6. Parse error -func Sort(references []string) []string { - var prefs []Reference - var bad []string - - for _, ref := range references { - pref, err := ParseAnyReference(ref) - if err != nil { - bad = append(bad, ref) - } else { - prefs = append(prefs, pref) - } - } - sort.Slice(prefs, func(a, b int) bool { - ar := refRank(prefs[a]) - br := refRank(prefs[b]) - if ar == br { - return prefs[a].String() < prefs[b].String() - } - return ar < br - }) - sort.Strings(bad) - var refs []string - for _, pref := range prefs { - refs = append(refs, pref.String()) - } - return append(refs, bad...) -} - -func refRank(ref Reference) uint8 { - if _, ok := ref.(Named); ok { - if _, ok = ref.(Tagged); ok { - if _, ok = ref.(Digested); ok { - return 1 - } - return 2 - } - if _, ok = ref.(Digested); ok { - return 3 - } - return 4 - } - return 5 -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index 5f93eeb4e..000000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2456 +0,0 @@ -# File @generated by hack/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See hack/generate-authors.sh to make modifications. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron Yoshitake -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Abirdcfly -Ada Mancini -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Thornton -Adam Walz -Adam Williams -AdamKorcz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Akshay Moghe -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Alano Terblanche -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alec Benson -Alejandro González Hevia -Aleksa Sarai -Aleksandr Chebotov -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Nordlund -Alex Olshansky -Alex Samorukov -Alex Stockinger -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Polakov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis Ries -Alexis Thomas -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Ameya Gawde -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Ushakov -Andrei Vagin -Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kim -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Lindeman -Andy Rothfusz -Andy Smith -Andy Wilson -Andy Zhang -Aneesh Kulkarni -Anes Hasicic -Angel Velazquez -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Aguilar -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anuj Varma -Anusha Ragunathan -Anyu Wang -apocas -Arash Deshmeh -arcosx -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Artem Khramov -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -Austin Vazquez -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -Azat Khuyiyakhmetov -Bao Yonglei -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -BartÅ‚omiej Piotrowski -Bastiaan Bakker -Bastien Pascard -bdevloed -Bearice Ren -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Langfeld -Ben Lovy -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Böhmke -Benjamin Wang -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Billy Ridgway -Bily Zhang -Bin Liu -Bingshen Wang -Bjorn Neergaard -Blake Geno -Boaz Shuster -bobby abbott -Bojun Zhu -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brennan Kinney <5098581+polarathene@users.noreply.github.com> -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Milford -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Calvin Liu -Cameron Boehmer -Cameron Sparr -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chee Hau Lim -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -Chentianze -Chenyang Yan -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -Chiranjeevi Tirunagari -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris Kreussling (Flatbush Gardener) -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Becker -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christoph Ziebuhr -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Petito -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clint Armstrong -Clinton Kitson -clubby789 -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Conor Evans -Corbin Coleman -Corey Farrell -Cory Forsyth -Cory Snider -cressie176 -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -cui fliter -CUI Wei -Cuong Manh Le -Cyprian Gracz -Cyril F -Da McGrady -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Plamadeala -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel P. Berrangé -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniele Rondina -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Bellotti -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Karlsson <35727626+dvdksn@users.noreply.github.com> -David Lawrence -David Lechner -David M. Karr -David Mackey -David Manouchehri -David Mat -David Mcanulty -David McKay -David O'Rourke -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Dhilip Kumars -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -dingwei -Diogo Monica -DiuDiugirl -Djibril Koné -Djordje Lukic -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Dorin Geman -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen LuÄanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Koromilas -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Eng Zer Jun -Enguerran -Enrico Weigelt, metux IT consult -Eohyung Lee -epeterso -er0k -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Mountain -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erich Cordoba -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Sipsma -Erik Sjölund -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Espen Suenson -Ethan Bell -Ethan Mosbaugh -Euan Harris -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Lezar -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein MÃ¥løy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Feroz Salam -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Filipe Pina -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Degrassi -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Frank Villaro-Dixon -Frank Yang -Fred Lifton -Frederick F. Kautz IV -Frederico F. de Oliveira -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -frobnicaty <92033765+frobnicaty@users.noreply.github.com> -Frédéric Dalleau -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Adrian Samfira -Gabriel Goller -Gabriel L. Somlo -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gabriel Tomitsuka -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoff Levand -Geoffrey Bachelet -Geon Kim -George Kontridze -George Ma -George MacRorie -George Xie -Georgi Hristozov -Georgy Yakovlev -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Giuseppe Scrivano -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz JaÅ›kiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> -Guoqiang QI -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -haining.cao -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -Hsing-Yu (David) Chen -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huajin Tong -huang-jl <1046678590@qq.com> -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Barrera -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hui Kang -Hunter Blanks -huqun -Huu Nguyen -Hyeongkyu Lee -Hyzhou Zhy -Iago López Galeiras -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Illia Antypenko -Illo Abdulrahim -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imalasong <2879499479@qq.com> -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jack Walker <90711509+j2walker@users.noreply.github.com> -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -Jakub Drahos -Jakub Guzik -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Sanders -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Breig -Jan Chren -Jan Garcia -Jan Götte -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslav Jindrak -Jaroslaw Zabiello -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Jasper Siepkes -Javier Bassi -jaxgeller -Jay -Jay Kamat -Jay Lim -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Michel Rouet -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeff Zvier -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeongseok Kang -Jeremy Chambers -Jeremy Grosser -Jeremy Huntwork -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jeyanthinath Muthuram -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zeng -Jian Zhang -Jiang Jinyang -Jianyong Wu -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Carroll -Jim Ehrismann -Jim Galasyn -Jim Lin -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joakim Roubert -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Geiler -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jordi Massaguer Pla -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun Du -Jun-Ru Chang -junxu -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Chadwell -Justin Cormack -Justin Force -Justin Keller <85903732+jk-vb@users.noreply.github.com> -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kaijie Chen -Kamil DomaÅ„ski -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Kazuyoshi Kato -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Bannister -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Alvarez -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -Kirk Easterson -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konrad Ponichtera -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Kostadin Plachkov -kpcyrd -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Squizzato -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Andringa -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Brehm -Laura Frank -Laurent Bernaille -Laurent Erignoux -Laurie Voss -Leandro Motta Barros -Leandro Siqueira -Lee Calcote -Lee Chao <932819864@qq.com> -Lee, Meng-Han -Lei Gong -Lei Jitang -Leiiwang -Len Weincier -Lennie -Leo Gallucci -Leonardo Nodari -Leonardo Taccari -Leszek Kowalski -Levi Blackstone -Levi Gross -Levi Harrison -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liangwei -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limeidan -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luboslav Pivarc -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Henrique Mulinari -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marat Radchenko -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Feit -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark Vainomaa -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Braun -Martin Dojcak -Martin Honermeyer -Martin Jirku -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Maru Newby -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Mathieu Paturel -Matt Apperson -Matt Bachmann -Matt Bajor -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Morrison <3maven@gmail.com> -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Fronton -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Max Timchenko -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Menghui Chen -Mert YazıcıoÄŸlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Beskin -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Kebe -Michael Kuehn -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael Weidmann -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Kostrzewa -Michal Minář -Michal Rostecki -Michal Wieczorek -Michaël Pailloncy -MichaÅ‚ Czeraszkiewicz -MichaÅ‚ Gryko -MichaÅ‚ Kosek -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Miguel Perez -Mihai Borobocea -Mihuleacc Sergiu -Mikael Davranche -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -Mike Sul -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milas Bowman -Milind Chawre -Miloslav TrmaÄ -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohd Sadiq -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Muhammad Zohaib Aslam -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Carlson -Nathan Herald -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Santos -Nick Stenning -Nick Stinemates -Nick Wood -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Niel Drummond -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -ningmingxiao -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Nolan Miles -Noriki Nakamura -nponeccop -Nurahmadie -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Onur Filiz -Oriol Francès -Oscar Bonilla <6f6231@gmail.com> -oscar.chen <2972789494@qq.com> -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Haas -Patrick Hemmer -Patrick St. laurent -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul "TBBle" Hampson -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Seiffert -Paul Weaver -Paulo Gomes -Paulo Ribeiro -Pavel Lobashov -Pavel MatÄ›ja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -PaweÅ‚ Gronowski -payall4u -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Pete Woods -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Å vihlík -Petros Angelatos -Phil -Phil Estes -Phil Sphicas -Phil Spitler -Philip Alexander Etling -Philip K. Warren -Philip Monroe -Philipp Fruck -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -Piotr Karbowski -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Pradipta Kr. Banerjee -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Puneet Pruthi -Pure White -pysqz -Qiang Huang -Qin TianHuan -Qinglan Peng -Quan Tian -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rachit Sharma -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -realityone -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Horwood -Rich Moyse -Rich Seymour -Richard Burnison -Richard Hansen -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> -Rob Gulewich -Rob Murray -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Shade -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Campos -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Roman Volosatovs -Roman Zabaluev -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -Rony Weng -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Roy Reznik -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui JingAn -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Barry -Ryan Belgrave -Ryan Campbell -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Shea -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Ryoga Saito -Régis Behmo -Rémy Greinhofer -s. rannou -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Thibault -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -sanchayanghosh -Sandeep Bansal -Sankar சஙà¯à®•ர௠-Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Moser -Scott Percival -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Sebastian Höffner -Sebastian Radloff -Sebastian Thomschke -Sebastien Goasguen -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -Serhii Nakon -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayan Pooya -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shengjing Zhu -Shev Yan -Shih-Yuan Lee -Shihao Xia -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Sian Lerk Lau -Siarhei Rasiukevich -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Smark Meng -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Sotiris Salloumis -Soulou -Spencer Brown -Spencer Smith -Spike Curtis -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan Gehrig -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Steffen Butzer -Stephan Henningsen -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -SteÌphane Este-Gracias -Stig Larsson -Su Wang -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Sören Tempel -Tabakhase -Tadej Janež -Takuto Sato -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -Terry Chu -terryding77 <550147740@qq.com> -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thiago Alves Silva -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Graf -Thomas Grainger -Thomas Hansen -Thomas Ledos -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tiago Seabra -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Claassen -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wagner -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> -timfeirg -Timo Rothenpieler -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Pfandzelter -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Parker -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tomek MaÅ„ko -Tommaso Visconti -Tomoya Tabuchi -Tomáš HrÄka -Tomáš Virtus -tonic -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Toshiaki Makita -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tudor Brindus -Ty Alexander -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -Valentin Kulesh -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Toni -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikas Choudhary -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Anjos -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vladislav Kolesnikov -Vlastimil Zeman -Vojtech Vitek (V-Teq) -voloder <110066198+voloder@users.noreply.github.com> -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wataru Ishida -Wayne Chang -Wayne Song -weebney -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wesley Pettit -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Nagele -Wolfgang Powisch -Wonjun Kim -WuLonghui -xamyzhao -Xia Wu -Xian Chaobo -Xianglin Gao -Xianjie -Xianlu Bird -Xiao YongBiao -Xiao Zhang -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaohua Ding -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -xin.li -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -yalpul -YAMADA Tsuyoshi -Yamasaki Masahide -Yamazaki Masashi -Yan Feng -Yan Zhu -Yang Bai -Yang Li -Yang Pengfei -yangchenliang -Yann Autissier -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有å‹) -youcai -Youcef YEKHLEF -Youfu Zhang -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yufei Xiong -Yuhao Fang -Yuichiro Kaneko -YujiOshima -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> -Yves Junqueira -Zac Dover -Zach Borboa -Zach Gershman -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -zhangguanzhang -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -ZhiPeng Lu -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Ãlvaro Lázaro -Ãtila Camurça Alves -å´å°ç™½ <296015668@qq.com> -å°¹å‰å³° -å±ˆéª -å¾ä¿Šæ° -æ…•é™¶ -æé€š -黄艳红00139573 -ì •ìž¬ì˜ diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d1..000000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index 381f19881..000000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index f831735f8..000000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,20 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of the current REST API. - DefaultVersion = "1.46" - - // MinSupportedAPIVersion is the minimum API version that can be supported - // by the API server, specified as "major.minor". Note that the daemon - // may be configured with a different minimum API version, as returned - // in [github.com/docker/docker/api/types.Version.MinAPIVersion]. - // - // API requests for API versions lower than the configured version produce - // an error. - MinSupportedAPIVersion = "1.24" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a02737..000000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index 78f0ce1f2..000000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,12770 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.46" -info: - title: "Docker Engine API" - version: "1.46" - x-logo: - url: "https://docs.docker.com/assets/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the - Docker client uses to communicate with the Engine, so everything the Docker - client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` - is `GET /containers/json`). The notable exception is running containers, - which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure - of the API call. The body of the response will be JSON in the following - format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.46) is used. - For example, calling `/info` is the same as calling `/v1.46/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send - authentication details to various endpoints that need to communicate with - registries, such as `POST /images/(name)/push`. These are sent as - `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) - (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this - structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), - you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. - See the [networking documentation](https://docs.docker.com/network/) - for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. Refer to the - [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) - for more information. - - To exec a command in a container, you first need to create an exec instance, - then start it. These two API endpoints are wrapped up in a single command-line - command, `docker exec`. - - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. Refer to the - [swarm mode documentation](https://docs.docker.com/engine/swarm/) - for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode - must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must - be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit - of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must - be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm - mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - description: "Host IP address that the container's port is mapped to" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: | - MountPoint represents a mount point configuration inside the container. - This is used for reporting the mountpoints in use by a container. - properties: - Type: - description: | - The mount type: - - - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - example: "volume" - Name: - description: | - Name is the name reference to the underlying data defined by `Source` - e.g., the volume name. - type: "string" - example: "myvolume" - Source: - description: | - Source location of the mount. - - For volumes, this contains the storage location of the volume (within - `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - the source (host) part of the bind-mount. For `tmpfs` mount points, this - field is empty. - type: "string" - example: "/var/lib/docker/volumes/myvolume/_data" - Destination: - description: | - Destination is the path relative to the container root (`/`) where - the `Source` is mounted inside the container. - type: "string" - example: "/usr/share/nginx/html/" - Driver: - description: | - Driver is the volume driver used to create the volume (if it is a volume). - type: "string" - example: "local" - Mode: - description: | - Mode is a comma separated list of options supplied by the user when - creating the bind/volume mount. - - The default is platform-specific (`"z"` on Linux, empty on Windows). - type: "string" - example: "z" - RW: - description: | - Whether the mount is mounted writable (read-write). - type: "boolean" - example: true - Propagation: - description: | - Propagation describes how mounts are propagated from the host into the - mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) - for details. This field is not used on Windows. - type: "string" - example: "" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - DeviceRequest: - type: "object" - description: "A request for devices to be sent to device drivers" - properties: - Driver: - type: "string" - example: "nvidia" - Count: - type: "integer" - example: -1 - DeviceIDs: - type: "array" - items: - type: "string" - example: - - "0" - - "1" - - "GPU-fef8089b-4820-abfc-e83e-94318197576e" - Capabilities: - description: | - A list of capabilities; an OR list of AND lists of capabilities. - type: "array" - items: - type: "array" - items: - type: "string" - example: - # gpu AND nvidia AND compute - - ["gpu", "nvidia", "compute"] - Options: - description: | - Driver-specific options, specified as a key/value pairs. These options - are passed directly to the driver. - type: "object" - additionalProperties: - type: "string" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - NonRecursive: - description: "Disable recursive bind mount." - type: "boolean" - default: false - CreateMountpoint: - description: "Create mount point on host if missing" - type: "boolean" - default: false - ReadOnlyNonRecursive: - description: | - Make the mount non-recursively read-only, but still leave the mount recursive - (unless NonRecursive is set to `true` in conjunction). - - Addded in v1.44, before that version all read-only mounts were - non-recursive by default. To match the previous behaviour this - will default to `true` for clients on versions prior to v1.44. - type: "boolean" - default: false - ReadOnlyForceRecursive: - description: "Raise an error if the mount cannot be made recursively read-only." - type: "boolean" - default: false - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - Subpath: - description: "Source path inside the volume. Must be relative without any back traversals." - type: "string" - example: "dir-inside-volume/subdirectory" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - Options: - description: | - The options to be passed to the tmpfs mount. An array of arrays. - Flag options should be provided as 1-length arrays. Other types - should be provided as as 2-length arrays, where the first item is - the key and the second the value. - type: "array" - items: - type: "array" - minItems: 1 - maxItems: 2 - items: - type: "string" - example: - [["noexec"]] - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to - restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is - added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `no` Do not automatically restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "no" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: | - If `on-failure` is used, the number of times to retry before giving up. - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: | - An integer value representing this container's relative CPU weight - versus other containers. - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: | - Path to `cgroups` under which the container's `cgroup` is created. If - the path is not absolute, the path is considered to be relative to the - `cgroups` path of the init process. Cgroups are created if they do not - already exist. - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form: - - ``` - [{"Path": "device_path", "Weight": weight}] - ``` - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: | - Microseconds of CPU time that the container can get in a CPU period. - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: | - The length of a CPU real-time period in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: | - The length of a CPU real-time runtime in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpusetCpus: - description: | - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - type: "string" - example: "0-3" - CpusetMems: - description: | - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only - effective on NUMA systems. - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DeviceRequests: - description: | - A list of requests for devices to be sent to device drivers. - type: "array" - items: - $ref: "#/definitions/DeviceRequest" - KernelMemoryTCP: - description: | - Hard limit for kernel TCP buffer memory (in bytes). Depending on the - OCI runtime in use, this option may be ignored. It is no longer supported - by the default (runc) runtime. - - This field is omitted when empty. - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: | - Total memory limit (memory + swap). Set as `-1` to enable unlimited - swap. - type: "integer" - format: "int64" - MemorySwappiness: - description: | - Tune a container's memory swappiness behavior. Accepts an integer - between 0 and 100. - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCpus: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - PidsLimit: - description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` - to not change. - type: "integer" - format: "int64" - x-nullable: true - Ulimits: - description: | - A list of resource limits to set in the container. For example: - - ``` - {"Name": "nofile", "Soft": 1024, "Hard": 2048} - ``` - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: | - Maximum IO in bytes per second for the container system drive - (Windows only). - type: "integer" - format: "int64" - - Limit: - description: | - An object describing a limit on resources which can be requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - Pids: - description: | - Limits the maximum number of PIDs in the container. Set `0` for unlimited. - type: "integer" - format: "int64" - default: 0 - example: 100 - - ResourceObject: - description: | - An object describing the resources which can be advertised by a node and - requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: | - User-defined resources can be either Integer resources (e.g, `SSD=3`) or - String resources (e.g, `GPU=UUID1`). - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: | - The time to wait between checks in nanoseconds. It should be 0 or at - least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Timeout: - description: | - The time to wait before considering the check to have hung. It should - be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Retries: - description: | - The number of consecutive failures needed to consider a container as - unhealthy. 0 means inherit. - type: "integer" - StartPeriod: - description: | - Start period for the container to initialize before starting - health-retries countdown in nanoseconds. It should be 0 or at least - 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - StartInterval: - description: | - The time to wait between checks in nanoseconds during the start period. - It should be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - - Health: - description: | - Health stores information about the container's healthcheck results. - type: "object" - x-nullable: true - properties: - Status: - description: | - Status is one of `none`, `starting`, `healthy` or `unhealthy` - - - "none" Indicates there is no healthcheck - - "starting" Starting indicates that the container is not yet ready - - "healthy" Healthy indicates that the container is running correctly - - "unhealthy" Unhealthy indicates that the container has a problem - type: "string" - enum: - - "none" - - "starting" - - "healthy" - - "unhealthy" - example: "healthy" - FailingStreak: - description: "FailingStreak is the number of consecutive failures" - type: "integer" - example: 0 - Log: - type: "array" - description: | - Log contains the last few results (oldest first) - items: - $ref: "#/definitions/HealthcheckResult" - - HealthcheckResult: - description: | - HealthcheckResult stores information about a single run of a healthcheck probe - type: "object" - x-nullable: true - properties: - Start: - description: | - Date and time at which this check started in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "date-time" - example: "2020-01-04T10:44:24.496525531Z" - End: - description: | - Date and time at which this check ended in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2020-01-04T10:45:21.364524523Z" - ExitCode: - description: | - ExitCode meanings: - - - `0` healthy - - `1` unhealthy - - `2` reserved (considered unhealthy) - - other values: error running probe - type: "integer" - example: 0 - Output: - description: "Output from last check" - type: "string" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding - is a string in one of these forms: - - - `host-src:container-dest[:options]` to bind-mount a host path - into the container. Both `host-src`, and `container-dest` must - be an _absolute_ path. - - `volume-name:container-dest[:options]` to bind-mount a volume - managed by a volume driver into the container. `container-dest` - must be an _absolute_ path. - - `options` is an optional, comma-delimited list of: - - - `nocopy` disables automatic copying of data from the container - path to the volume. The `nocopy` flag only applies to named volumes. - - `[ro|rw]` mounts a volume read-only or read-write, respectively. - If omitted or set to `rw`, volumes are mounted read-write. - - `[z|Z]` applies SELinux labels to allow or deny multiple containers - to read and write to the same volume. - - `z`: a _shared_ content label is applied to the content. This - label indicates that multiple containers can share the volume - content, for both reading and writing. - - `Z`: a _private unshared_ label is applied to the content. - This label indicates that only the current container can use - a private volume. Labeling systems such as SELinux require - proper labels to be placed on volume content that is mounted - into a container. Without a label, the security system can - prevent a container's processes from using the content. By - default, the labels set by the host operating system are not - modified. - - `[[r]shared|[r]slave|[r]private]` specifies mount - [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - This only applies to bind-mounted volumes, not internal volumes - or named volumes. Mount propagation requires the source mount - point (the location where the source directory is mounted in the - host operating system) to have the correct propagation properties. - For shared volumes, the source mount point must be set to `shared`. - For slave volumes, the mount must be set to either `shared` or - `slave`. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: | - Network mode to use for this container. Supported standard values - are: `bridge`, `host`, `none`, and `container:`. Any - other value is taken as a custom network's name to which this - container should connect to. - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: | - Automatically remove the container when the container's process - exits. This has no effect if `RestartPolicy` is set. - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: | - A list of volumes to inherit from another container, specified in - the form `[:]`. - items: - type: "string" - Mounts: - description: | - Specification for mounts to be added to the container. - type: "array" - items: - $ref: "#/definitions/Mount" - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - Annotations: - type: "object" - description: | - Arbitrary non-identifying metadata attached to container and - provided to the runtime when the container is started. - additionalProperties: - type: "string" - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: | - A list of kernel capabilities to add to the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CapDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CgroupnsMode: - type: "string" - enum: - - "private" - - "host" - description: | - cgroup namespace mode for the container. Possible values are: - - - `"private"`: the container runs in its own private cgroup namespace - - `"host"`: use the host system's cgroup namespace - - If not specified, the daemon default is used, which can either be `"private"` - or `"host"`, depending on daemon version, kernel support and configuration. - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` - file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: | - A list of links for the container in the form `container_name:alias`. - items: - type: "string" - OomScoreAdj: - type: "integer" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be - either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when - the container starts. The allocated port might be changed when - restarting the container. - - The port is selected from the ephemeral port range that depends on - the kernel. For example, on Linux the range is defined by - `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: | - A list of string values to customize labels for MLS systems, such - as SELinux. - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs - mounts, and their corresponding mount options. For example: - - ``` - { "/run": "rw,noexec,nosuid,size=65536k" } - ``` - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: | - Sets the usernamespace mode for the container when usernamespace - remapping option is enabled. - ShmSize: - type: "integer" - format: "int64" - description: | - Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. - For example: - - ``` - {"net.ipv4.ip_forward": "1"} - ``` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - Isolation: - type: "string" - description: | - Isolation technology of the container. (Windows only) - enum: - - "default" - - "process" - - "hyperv" - MaskedPaths: - type: "array" - description: | - The list of paths to be masked inside the container (this overrides - the default set of paths). - items: - type: "string" - ReadonlyPaths: - type: "array" - description: | - The list of paths to be set as read-only inside the container - (this overrides the default set of paths). - items: - type: "string" - - ContainerConfig: - description: | - Configuration for a container that is portable between hosts. - type: "object" - properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - type: "string" - example: "439f4e91bd1d" - Domainname: - description: | - The domain name to use for the container. - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - type: "string" - example: "example-image:1.0" - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - x-nullable: true - MacAddress: - description: | - MAC address of the container. - - Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. - type: "string" - x-nullable: true - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - - ImageConfig: - description: | - Configuration of the image. These fields are used as defaults - when starting a container from the image. - type: "object" - properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. - type: "string" - example: "" - Domainname: - description: | - The domain name to use for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. - type: "string" - example: "" - User: - description: "The user that commands are run as inside the container." - type: "string" - example: "web:web" - AttachStdin: - description: | - Whether to attach to `stdin`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - AttachStdout: - description: | - Whether to attach to `stdout`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - AttachStderr: - description: | - Whether to attach to `stderr`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - OpenStdin: - description: | - Open `stdin` - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - StdinOnce: - description: | - Close `stdin` after one attached client disconnects. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. - type: "string" - default: "" - example: "" - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: - "/app/data": {} - "/app/config": {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - NetworkDisabled: - description: | - Disable networking for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. - type: "boolean" - default: false - example: false - x-nullable: true - MacAddress: - description: | - MAC address of the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. - type: "string" - default: "" - example: "" - x-nullable: true - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - StopTimeout: - description: | - Timeout to stop a container in seconds. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. - type: "integer" - default: 10 - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. - example: - "Hostname": "" - "Domainname": "" - "User": "web:web" - "AttachStdin": false - "AttachStdout": false - "AttachStderr": false - "ExposedPorts": { - "80/tcp": {}, - "443/tcp": {} - } - "Tty": false - "OpenStdin": false - "StdinOnce": false - "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] - "Cmd": ["/bin/sh"] - "Healthcheck": { - "Test": ["string"], - "Interval": 0, - "Timeout": 0, - "Retries": 0, - "StartPeriod": 0, - "StartInterval": 0 - } - "ArgsEscaped": true - "Image": "" - "Volumes": { - "/app/data": {}, - "/app/config": {} - } - "WorkingDir": "/public/" - "Entrypoint": [] - "OnBuild": [] - "Labels": { - "com.example.some-label": "some-value", - "com.example.some-other-label": "some-other-value" - } - "StopSignal": "SIGTERM" - "Shell": ["/bin/sh", "-c"] - - NetworkingConfig: - description: | - NetworkingConfig represents the container's networking configuration for - each of its interfaces. - It is used for the networking configs specified in the `docker create` - and `docker network connect` commands. - type: "object" - properties: - EndpointsConfig: - description: | - A mapping of network name to endpoint configuration for that network. - The endpoint configuration can be left empty to connect to that - network with no particular endpoint configuration. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because EndpointSettings contains - # operational data returned when inspecting a container that we don't - # accept here. - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - MacAddress: "02:42:ac:12:05:02" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - database_nw: {} - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: | - Name of the default bridge interface when dockerd's --bridge flag is set. - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - - Deprecated: This field is never set and will be removed in a future release. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: | - IPv6 unicast address using the link-local prefix. - - Deprecated: This field is never set and will be removed in a future release. - type: "string" - example: "" - LinkLocalIPv6PrefixLen: - description: | - Prefix length of the IPv6 unicast address. - - Deprecated: This field is never set and will be removed in a future release. - type: "integer" - example: "" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey is the full path of the netns handle - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - SecondaryIPAddresses: - description: "Deprecated: This field is never set and will be removed in a future release." - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - SecondaryIPv6Addresses: - description: "Deprecated: This field is never set and will be removed in a future release." - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - x-nullable: true - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - GraphDriverData: - description: | - Information about the storage driver used to store the container's and - image's filesystem. - type: "object" - required: [Name, Data] - properties: - Name: - description: "Name of the storage driver." - type: "string" - x-nullable: false - example: "overlay2" - Data: - description: | - Low-level storage metadata, provided as key/value pairs. - - This information is driver-specific, and depends on the storage-driver - in use, and should be used for informational purposes only. - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: { - "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", - "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", - "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" - } - - FilesystemChange: - description: | - Change in the container's filesystem. - type: "object" - required: [Path, Kind] - properties: - Path: - description: | - Path to file or directory that has changed. - type: "string" - x-nullable: false - Kind: - $ref: "#/definitions/ChangeType" - - ChangeType: - description: | - Kind of change - - Can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - - ImageInspect: - description: | - Information about an image in the local image cache. - type: "object" - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Parent: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - Comment: - description: | - Optional message that was set when committing or importing the image. - type: "string" - x-nullable: false - example: "" - Created: - description: | - Date and time at which the image was created, formatted in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if present in the image, - and omitted otherwise. - type: "string" - format: "dateTime" - x-nullable: true - example: "2022-02-04T21:20:12.497794809Z" - DockerVersion: - description: | - The version of Docker that was used to build the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "27.0.1" - Author: - description: | - Name of the author that was specified when committing the image, or as - specified through MAINTAINER (deprecated) in the Dockerfile. - type: "string" - x-nullable: false - example: "" - Config: - $ref: "#/definitions/ImageConfig" - Architecture: - description: | - Hardware CPU architecture that the image runs on. - type: "string" - x-nullable: false - example: "arm" - Variant: - description: | - CPU architecture variant (presently ARM-only). - type: "string" - x-nullable: true - example: "v7" - Os: - description: | - Operating System the image is built to run on. - type: "string" - x-nullable: false - example: "linux" - OsVersion: - description: | - Operating System version the image is built to run on (especially - for Windows). - type: "string" - example: "" - x-nullable: true - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 1239828 - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - description: | - Information about the image's RootFS, including the layer IDs. - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - example: "layers" - Layers: - type: "array" - items: - type: "string" - example: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - Metadata: - description: | - Additional metadata of the image in the local cache. This information - is local to the daemon, and not part of the image itself. - type: "object" - properties: - LastTagTime: - description: | - Date and time at which the image was last tagged in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if the image was tagged locally, - and omitted otherwise. - type: "string" - format: "dateTime" - example: "2022-02-28T14:40:02.623929178Z" - x-nullable: true - - ImageSummary: - type: "object" - x-go-name: "Summary" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - Labels - - Containers - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - ParentId: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Created: - description: | - Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). - type: "integer" - x-nullable: false - example: "1644009612" - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 172064416 - SharedSize: - description: | - Total size of image layers that are shared between this image and other - images. - - This size is not calculated by default. `-1` indicates that the value - has not been set / calculated. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: |- - Total size of the image including all layers it is composed of. - - Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - type: "integer" - format: "int64" - example: 172064416 - Labels: - description: "User-defined key/value metadata." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Containers: - description: | - Number of containers using this image. Includes both stopped and running - containers. - - This size is not calculated by default, and depends on which API endpoint - is used. `-1` indicates that the value has not been set / calculated. - x-nullable: false - type: "integer" - example: 2 - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - example: "tardis" - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - example: "custom" - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - example: "/var/lib/docker/volumes/tardis" - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - example: "2016-06-07T20:31:11.853781916Z" - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - example: - hello: "world" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: - type: "string" - description: | - The level at which the volume exists. Either `global` for cluster-wide, - or `local` for machine level. - default: "local" - x-nullable: false - enum: ["local", "global"] - example: "local" - ClusterVolume: - $ref: "#/definitions/ClusterVolume" - Options: - type: "object" - description: | - The driver specific options used when creating the volume. - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - UsageData: - type: "object" - x-nullable: true - x-go-name: "UsageData" - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - format: "int64" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - format: "int64" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - VolumeCreateOptions: - description: "Volume configuration" - type: "object" - title: "VolumeConfig" - x-go-name: "CreateOptions" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - example: "tardis" - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - example: "custom" - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - ClusterVolumeSpec: - $ref: "#/definitions/ClusterVolumeSpec" - - VolumeListResponse: - type: "object" - title: "VolumeListResponse" - x-go-name: "ListResponse" - description: "Volume list response" - properties: - Volumes: - type: "array" - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - example: [] - - Network: - type: "object" - properties: - Name: - description: | - Name of the network. - type: "string" - example: "my_network" - Id: - description: | - ID that uniquely identifies a network on a single machine. - type: "string" - example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: - description: | - Date and time at which the network was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-10-19T04:33:30.360899459Z" - Scope: - description: | - The level at which the network exists (e.g. `swarm` for cluster-wide - or `local` for machine level) - type: "string" - example: "local" - Driver: - description: | - The name of the driver used to create the network (e.g. `bridge`, - `overlay`). - type: "string" - example: "overlay" - EnableIPv6: - description: | - Whether the network was created with IPv6 enabled. - type: "boolean" - example: false - IPAM: - $ref: "#/definitions/IPAM" - Internal: - description: | - Whether the network is created to only allow internal networking - connectivity. - type: "boolean" - default: false - example: false - Attachable: - description: | - Wheter a global / swarm scope network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - default: false - example: false - Ingress: - description: | - Whether the network is providing the routing-mesh for the swarm cluster. - type: "boolean" - default: false - example: false - ConfigFrom: - $ref: "#/definitions/ConfigReference" - ConfigOnly: - description: | - Whether the network is a config-only network. Config-only networks are - placeholder networks for network configurations to be used by other - networks. Config-only networks cannot be used directly to run containers - or services. - type: "boolean" - default: false - Containers: - description: | - Contains endpoints attached to the network. - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - example: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - description: | - Network-specific options uses when creating the network. - type: "object" - additionalProperties: - type: "string" - example: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Peers: - description: | - List of peer nodes for an overlay network. This field is only present - for overlay networks, and omitted for other network types. - type: "array" - items: - $ref: "#/definitions/PeerInfo" - x-nullable: true - # TODO: Add Services (only present when "verbose" is set). - - ConfigReference: - description: | - The config-only network source to provide the configuration for - this network. - type: "object" - properties: - Network: - description: | - The name of the config-only network that provides the network's - configuration. The specified network must be an existing config-only - network. Only network names are allowed, not network IDs. - type: "string" - example: "config_only_network_01" - - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - example: "default" - Config: - description: | - List of IPAM configuration options, specified as a map: - - ``` - {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } - ``` - type: "array" - items: - $ref: "#/definitions/IPAMConfig" - Options: - description: "Driver-specific options, specified as a map." - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - - IPAMConfig: - type: "object" - properties: - Subnet: - type: "string" - example: "172.20.0.0/16" - IPRange: - type: "string" - example: "172.20.10.0/24" - Gateway: - type: "string" - example: "172.20.10.11" - AuxiliaryAddresses: - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - example: "container_1" - EndpointID: - type: "string" - example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: - type: "string" - example: "02:42:ac:13:00:02" - IPv4Address: - type: "string" - example: "172.19.0.2/16" - IPv6Address: - type: "string" - example: "" - - PeerInfo: - description: | - PeerInfo represents one peer of an overlay network. - type: "object" - properties: - Name: - description: - ID of the peer-node in the Swarm cluster. - type: "string" - example: "6869d7c1732b" - IP: - description: - IP-address of the peer-node in the Swarm cluster. - type: "string" - example: "10.133.77.91" - - NetworkCreateResponse: - description: "OK response to NetworkCreate operation" - type: "object" - title: "NetworkCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warning] - properties: - Id: - description: "The ID of the created network." - type: "string" - x-nullable: false - example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" - Warning: - description: "Warnings encountered when creating the container" - type: "string" - x-nullable: false - example: "" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - BuildCache: - type: "object" - description: | - BuildCache contains information about a build cache record. - properties: - ID: - type: "string" - description: | - Unique ID of the build cache record. - example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" - Parents: - description: | - List of parent build cache record IDs. - type: "array" - items: - type: "string" - x-nullable: true - example: ["hw53o5aio51xtltp5xjp8v7fx"] - Type: - type: "string" - description: | - Cache record type. - example: "regular" - # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 - enum: - - "internal" - - "frontend" - - "source.local" - - "source.git.checkout" - - "exec.cachemount" - - "regular" - Description: - type: "string" - description: | - Description of the build-step that produced the build cache. - example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: - type: "boolean" - description: | - Indicates if the build cache is in use. - example: false - Shared: - type: "boolean" - description: | - Indicates if the build cache is shared. - example: true - Size: - description: | - Amount of disk space used by the build cache (in bytes). - type: "integer" - example: 51 - CreatedAt: - description: | - Date and time at which the build cache was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - LastUsedAt: - description: | - Date and time at which the build cache was last used in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - x-nullable: true - example: "2017-08-09T07:09:37.632105588Z" - UsageCount: - type: "integer" - example: 26 - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - MacAddress: - description: | - MAC address for the endpoint on this network. The network driver might ignore this parameter. - type: "string" - example: "02:42:ac:11:00:04" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - DNSNames: - description: | - List of all DNS names an endpoint has on a specific network. This - list is based on the container name, network aliases, container short - ID, and hostname. - - These DNS names are non-fully qualified but can contain several dots. - You can get fully qualified DNS names by appending `.`. - For instance, if container name is `my.ctr` and the network is named - `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be - `my.ctr.testnet`. - type: array - items: - type: string - example: ["foobar", "server_x", "server_y", "my.ctr"] - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - PluginPrivilege: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - x-go-name: "PluginPrivilege" - properties: - Name: - type: "string" - example: "network" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - "host" - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: - True if the plugin is running. False if the plugin is not running, - only installed. - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - ProtocolScheme: - type: "string" - example: "some.protocol/v1.0" - description: "Protocol to use for clients connecting to the plugin." - enum: - - "" - - "moby.plugins.http/v1" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed - to avoid conflicting writes. The client must send the version number along - with the modified specification when updating these objects. - - This approach ensures safe concurrency and determinism in that the change - on the object may not be applied if the version number has changed from the - last read. In other words, if two update requests specify the same base - version, only one of the requests can succeed. As a result, two separate - update requests that happen at the same time will not unintentionally - overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: | - Information about the issuer of leaf TLS certificates and the trusted root - CA certificate. - type: "object" - properties: - TrustRoot: - description: | - The root CA certificate(s) that are used to validate leaf TLS - certificates. - type: "string" - CertIssuerSubject: - description: - The base64-url-safe-encoded raw subject bytes of the issuer. - type: "string" - CertIssuerPublicKey: - description: | - The base64-url-safe-encoded raw public key bytes of the issuer. - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: | - The number of historic tasks to keep per instance or node. If - negative, never remove completed or failed tasks. - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: | - The number of snapshots to keep beyond the current snapshot. - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: | - The number of log entries to keep around to sync up slow followers - after a snapshot is created. - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from - the leader before becoming a candidate and starting an election. - `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, - the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: | - The delay for an agent to send a heartbeat to the dispatcher. - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: | - Configuration for forwarding signing requests to an external - certificate authority. - type: "array" - items: - type: "object" - properties: - Protocol: - description: | - Protocol for communication with the external CA (currently - only `cfssl` is supported). - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: | - URL where certificate signing requests should be sent. - type: "string" - Options: - description: | - An object with key/value pairs that are interpreted as - protocol-specific options for the external CA driver. - type: "object" - additionalProperties: - type: "string" - CACert: - description: | - The root CA certificate (in PEM format) this external CA uses - to issue TLS certificates (assumed to be to the current swarm - root CA certificate if not provided). - type: "string" - SigningCACert: - description: | - The desired signing CA certificate for all swarm node TLS leaf - certificates, in PEM format. - type: "string" - SigningCAKey: - description: | - The desired signing CA key for all swarm node TLS leaf certificates, - in PEM format. - type: "string" - ForceRotate: - description: | - An integer whose purpose is to force swarm to generate a new - signing CA certificate and key, if none have been specified in - `SigningCACert` and `SigningCAKey` - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: | - If set, generate a key and use it to lock data stored on the - managers. - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selectd log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: | - Whether there is currently a root CA rotation in progress for the swarm - type: "boolean" - example: false - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - If no port is set or is set to 0, the default port (4789) is used. - type: "integer" - format: "uint32" - default: 4789 - example: 4789 - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope - networks. - type: "array" - items: - type: "string" - format: "CIDR" - example: ["10.10.0.0/16", "20.20.0.0/16"] - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the - default subnet pool. - type: "integer" - format: "uint32" - maximum: 29 - default: 24 - example: 24 - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: | - Plugin spec for the service. *(Experimental release only.)* - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - ContainerSpec: - type: "object" - description: | - Container spec for the service. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: | - The hostname to use for the container, as a valid - [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. - type: "string" - Env: - description: | - A list of environment variables in the form `VAR=value`. - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - Config: - type: "string" - example: "0bt9dmxjvjiqermk6xrop3ekq" - description: | - Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs - field with the Runtime property set. - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - File: - type: "string" - example: "spec.json" - description: | - Load credential spec from this file. The file is read by - the daemon, and must be present in the `CredentialSpecs` - subdirectory in the docker data directory, which defaults - to `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads - `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows - registry. The specified registry value must be located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - Seccomp: - type: "object" - description: "Options for configuring seccomp on the container" - properties: - Mode: - type: "string" - enum: - - "default" - - "unconfined" - - "custom" - Profile: - description: "The custom seccomp profile as a json object" - type: "string" - AppArmor: - type: "object" - description: "Options for configuring AppArmor on the container" - properties: - Mode: - type: "string" - enum: - - "default" - - "disabled" - NoNewPrivileges: - type: "boolean" - description: "Configuration of the no_new_privs bit in the container" - - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: | - Specification for mounts to be added to containers created as part - of the service. - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: | - Amount of time to wait for the container to terminate before - forcefully killing it. - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: | - Specification for DNS related configurations in resolver configuration - file (`resolv.conf`). - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: | - A list of internal resolver variables to be modified (e.g., - `debug`, `ndots:3`, etc.). - type: "array" - items: - type: "string" - Secrets: - description: | - Secrets contains references to zero or more secrets that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: | - SecretID represents the ID of the specific secret that we're - referencing. - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, - but this is just provided for lookup/display purposes. The - secret in the reference will be identified by its ID. - type: "string" - OomScoreAdj: - type: "integer" - format: "int64" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 0 - Configs: - description: | - Configs contains references to zero or more configs that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - Runtime: - description: | - Runtime represents a target that is not mounted into the - container but is used by the task - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually - > exclusive - type: "object" - ConfigID: - description: | - ConfigID represents the ID of the specific config that we're - referencing. - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, - but this is just provided for lookup/display purposes. The - config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: | - Isolation technology of the containers running the service. - (Windows only) - enum: - - "default" - - "process" - - "hyperv" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - Sysctls: - description: | - Set kernel namedspaced parameters (sysctls) in the container. - The Sysctls option on services accepts the same sysctls as the - are supported on containers. Note that while the same sysctls are - supported, no guarantees or checks are made about their - suitability for a clustered environment, and it's up to the user - to determine whether a given sysctl will work properly in a - Service. - type: "object" - additionalProperties: - type: "string" - # This option is not used by Windows containers - CapabilityAdd: - type: "array" - description: | - A list of kernel capabilities to add to the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - - "CAP_SYS_ADMIN" - - "CAP_SYS_CHROOT" - - "CAP_SYSLOG" - CapabilityDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - NetworkAttachmentSpec: - description: | - Read-only spec type for non-swarm containers attached to swarm overlay - networks. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - type: "object" - properties: - ContainerID: - description: "ID of the container represented by this task" - type: "string" - Resources: - description: | - Resource requirements which apply to each individual container created - as part of the service. - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/Limit" - Reservations: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: | - Specification for the restart policy which applies to containers - created as part of this service. - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: | - Maximum attempts to restart a given container before giving up - (default value is 0, which is ignored). - type: "integer" - format: "int64" - default: 0 - Window: - description: | - Windows is the time window used to evaluate the restart policy - (default value is 0, which is unbounded). - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: | - An array of constraint expressions to limit the set of nodes where - a task can be scheduled. Constraint expressions can either use a - _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find - nodes that satisfy every expression (AND match). Constraints can - match node or Docker Engine labels as follows: - - node attribute | matches | example - ---------------------|--------------------------------|----------------------------------------------- - `node.id` | Node ID | `node.id==2ivku8v2gvtg4` - `node.hostname` | Node hostname | `node.hostname!=node-2` - `node.role` | Node role (`manager`/`worker`) | `node.role==manager` - `node.platform.os` | Node operating system | `node.platform.os==windows` - `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` - `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` - - `engine.labels` apply to Docker Engine labels like operating system, - drivers, etc. Swarm administrators add `node.labels` for operational - purposes by using the [`node update endpoint`](#operation/NodeUpdate). - - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - - "node.platform.os==linux" - - "node.platform.arch==x86_64" - Preferences: - description: | - Preferences provide a way to make the scheduler aware of factors - such as topology. They are provided in order from highest to - lowest precedence. - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: | - label descriptor, such as `engine.labels.az`. - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - MaxReplicas: - description: | - Maximum number of replicas for per node (default value is 0, which - is unlimited) - type: "integer" - format: "int64" - default: 0 - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: | - A counter that triggers an update even if no relevant parameters have - been changed. - type: "integer" - Runtime: - description: | - Runtime is the type of runtime specified for the task executor. - type: "string" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - LogDriver: - description: | - Specifies the log driver to use for tasks created from this spec. If - not present, the default one for the swarm will be used, finally - falling back to the engine default if not specified. - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - ContainerStatus: - type: "object" - description: "represents the status of a container." - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - - PortStatus: - type: "object" - description: "represents the port status of a task's host ports whose service has published host ports" - properties: - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - TaskStatus: - type: "object" - description: "represents the status of a task." - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - $ref: "#/definitions/ContainerStatus" - PortStatus: - $ref: "#/definitions/PortStatus" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - $ref: "#/definitions/TaskStatus" - DesiredState: - $ref: "#/definitions/TaskState" - JobIteration: - description: | - If the Service this Task belongs to is a job-mode service, contains - the JobIteration of the Service this Task was created for. Absent if - the Task was created for a Replicated or Global Service. - $ref: "#/definitions/ObjectVersion" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - type: object - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - ReplicatedJob: - description: | - The mode used for services with a finite number of tasks that run - to a completed state. - type: "object" - properties: - MaxConcurrent: - description: | - The maximum number of replicas to run simultaneously. - type: "integer" - format: "int64" - default: 1 - TotalCompletions: - description: | - The total number of replicas desired to reach the Completed - state. If unset, will default to the value of `MaxConcurrent` - type: "integer" - format: "int64" - GlobalJob: - description: | - The mode used for services which run a task to the completed state - on each valid node. - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be updated in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an updated task fails to run, or stops running - during the update. - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: | - Amount of time to monitor each updated task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during an update before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling out an updated task. Either - the old task is shut down before the new task is started, or the - new task is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be rolled back in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: | - Amount of time between rollback iterations, in nanoseconds. - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an rolled back task fails to run, or stops - running during the rollback. - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: | - Amount of time to monitor each rolled back task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during a rollback before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling back a task. Either the old - task is shut down before the new task is started, or the new task - is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: | - Specifies which networks the service should attach to. - - Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: | - The mode of resolution to use for internal load balancing between tasks. - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: | - List of exposed ports that this service is accessible on from the - outside. Ports can only be provided if `vip` resolution mode is used. - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - ServiceStatus: - description: | - The status of the service's tasks. Provided only when requested as - part of a ServiceList operation. - type: "object" - properties: - RunningTasks: - description: | - The number of tasks for the service currently in the Running state. - type: "integer" - format: "uint64" - example: 7 - DesiredTasks: - description: | - The number of tasks for the service desired to be running. - For replicated services, this is the replica count from the - service spec. For global services, this is computed by taking - count of all tasks for the service with a Desired State other - than Shutdown. - type: "integer" - format: "uint64" - example: 10 - CompletedTasks: - description: | - The number of tasks for a job that are in the Completed state. - This field must be cross-referenced with the service type, as the - value of 0 may mean the service is not in a job mode, or it may - mean the job-mode service has no tasks yet Completed. - type: "integer" - format: "uint64" - JobStatus: - description: | - The status of the service when it is in one of ReplicatedJob or - GlobalJob modes. Absent on Replicated and Global mode services. The - JobIteration is an ObjectVersion, but unlike the Service's version, - does not need to be sent with an update request. - type: "object" - properties: - JobIteration: - description: | - JobIteration is a value increased each time a Job is executed, - successfully or otherwise. "Executed", in this case, means the - job as a whole has been started, not that an individual Task has - been launched. A job is "Executed" when its ServiceSpec is - updated. JobIteration can be used to disambiguate Tasks belonging - to different executions of a job. Though JobIteration will - increase with each subsequent execution, it may not necessarily - increase by 1, and so JobIteration should not be used to - $ref: "#/definitions/ObjectVersion" - LastExecution: - description: | - The last time, as observed by the server, that this job was - started. - type: "string" - format: "dateTime" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - x-go-name: "DeleteResponse" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceCreateResponse: - type: "object" - description: | - contains the information returned to a client on the - creation of a new service. - properties: - ID: - description: "The ID of the created service." - type: "string" - x-nullable: false - example: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warnings: - description: | - Optional warning message. - - FIXME(thaJeztah): this should have "omitempty" in the generated type. - type: "array" - x-nullable: true - items: - type: "string" - example: - - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warnings: - - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerSummary: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - Annotations: - description: "Arbitrary key-value metadata attached to container" - type: "object" - x-nullable: true - additionalProperties: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: | - Name of the secrets driver used to fetch the secret's value from an - external secret store. - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - ContainerState: - description: | - ContainerState stores container's running state. It's part of ContainerJSONBase - and will be returned by the "inspect" command. - type: "object" - x-nullable: true - properties: - Status: - description: | - String representation of the container state. Can be one of "created", - "running", "paused", "restarting", "removing", "exited", or "dead". - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - example: "running" - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the freezer cgroup is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - example: true - Paused: - description: "Whether this container is paused." - type: "boolean" - example: false - Restarting: - description: "Whether this container is restarting." - type: "boolean" - example: false - OOMKilled: - description: | - Whether a process within this container has been killed because it ran - out of memory since the container was last started. - type: "boolean" - example: false - Dead: - type: "boolean" - example: false - Pid: - description: "The process ID of this container" - type: "integer" - example: 1234 - ExitCode: - description: "The last exit code of this container" - type: "integer" - example: 0 - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - example: "2020-01-06T09:06:59.461876391Z" - FinishedAt: - description: "The time when this container last exited." - type: "string" - example: "2020-01-06T09:07:59.461876391Z" - Health: - $ref: "#/definitions/Health" - - ContainerCreateResponse: - description: "OK response to ContainerCreate operation" - type: "object" - title: "ContainerCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - example: [] - - ContainerWaitResponse: - description: "OK response to ContainerWait operation" - type: "object" - x-go-name: "WaitResponse" - title: "ContainerWaitResponse" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - format: "int64" - x-nullable: false - Error: - $ref: "#/definitions/ContainerWaitExitError" - - ContainerWaitExitError: - description: "container waiting error, if any" - type: "object" - x-go-name: "WaitExitError" - properties: - Message: - description: "Details of an error" - type: "string" - - SystemVersion: - type: "object" - description: | - Response of Engine API: GET "/version" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - description: | - Information about system components - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - description: | - Name of the component - type: "string" - example: "Engine" - Version: - description: | - Version of the component - type: "string" - x-nullable: false - example: "27.0.1" - Details: - description: | - Key/value pairs of strings with additional information about the - component. These values are intended for informational purposes - only, and their content is not defined, and not part of the API - specification. - - These messages can be printed by the client as information to the user. - type: "object" - x-nullable: true - Version: - description: "The version of the daemon" - type: "string" - example: "27.0.1" - ApiVersion: - description: | - The default (and highest) API version that is supported by the daemon - type: "string" - example: "1.46" - MinAPIVersion: - description: | - The minimum API version that is supported by the daemon - type: "string" - example: "1.24" - GitCommit: - description: | - The Git commit of the source code that was used to build the daemon - type: "string" - example: "48a66213fe" - GoVersion: - description: | - The version Go used to compile the daemon, and the version of the Go - runtime in use. - type: "string" - example: "go1.21.12" - Os: - description: | - The operating system that the daemon is running on ("linux" or "windows") - type: "string" - example: "linux" - Arch: - description: | - The architecture that the daemon is running on - type: "string" - example: "amd64" - KernelVersion: - description: | - The kernel version (`uname -r`) that the daemon is running on. - - This field is omitted when empty. - type: "string" - example: "6.8.0-31-generic" - Experimental: - description: | - Indicates if the daemon is started with experimental features enabled. - - This field is omitted when empty / false. - type: "boolean" - example: true - BuildTime: - description: | - The date and time that the daemon was compiled. - type: "string" - example: "2020-06-22T15:49:27.000000000+00:00" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemoryTCP: - description: | - Indicates if the host has kernel memory TCP limit support enabled. This - field is omitted if not supported. - - Kernel memory TCP limits are not supported when using cgroups v2, which - does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. - type: "boolean" - example: true - CpuCfsPeriod: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) period is supported by - the host. - type: "boolean" - example: true - CpuCfsQuota: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by - the host. - type: "boolean" - example: true - CPUShares: - description: | - Indicates if CPU Shares limiting is supported by the host. - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - PidsLimit: - description: "Indicates if the host kernel has PID limit support enabled." - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." - type: "boolean" - example: true - BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." - type: "boolean" - example: true - Debug: - description: | - Indicates if the daemon is running in debug-mode / with debug-level - logging enabled. - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd", "none"] - default: "cgroupfs" - example: "cgroupfs" - CgroupVersion: - description: | - The version of the cgroup. - type: "string" - enum: ["1", "2"] - default: "1" - example: "1" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "6.8.0-31-generic" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 24.04 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Ubuntu 24.04 LTS" - OSVersion: - description: | - Version of the host's operating system - -


- - > **Note**: The information returned in this field, including its - > very existence, and the formatting of values, should not be considered - > stable, and may change without notice. - type: "string" - example: "24.04" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in bytes. - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - type: "string" - example: "27.0.1" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "runc" - example: - runc: - path: "runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - InitBinary: - description: | - Name and, optional, path of the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, user-namespaces (userns), rootless and - no-new-privileges. - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - - "name=rootless" - ProductLicense: - description: | - Reports a summary of the product license on the daemon. - - If a commercial license has been applied to the daemon, information - such as number of nodes, and expiration are included. - type: "string" - example: "Community Engine" - DefaultAddressPools: - description: | - List of custom default address pools for local networks, which can be - specified in the daemon.json file or dockerd option. - - Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 - 10.10.[0-255].0/24 address pools. - type: "array" - items: - type: "object" - properties: - Base: - description: "The network address in CIDR format" - type: "string" - example: "10.10.0.0/16" - Size: - description: "The network pool size" - type: "integer" - example: "24" - Warnings: - description: | - List of warnings / informational messages about missing features, or - issues related to the daemon configuration. - - These messages can be printed by the client as information to the user. - type: "array" - items: - type: "string" - example: - - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" - CDISpecDirs: - description: | - List of directories where (Container Device Interface) CDI - specifications are located. - - These specifications define vendor-specific modifications to an OCI - runtime specification for a container being created. - - An empty list indicates that CDI device injection is disabled. - - Note that since using CDI device injection requires the daemon to have - experimental enabled. For non-experimental daemons an empty list will - always be returned. - type: "array" - items: - type: "string" - example: - - "/etc/cdi" - - "/var/run/cdi" - Containerd: - $ref: "#/definitions/ContainerdInfo" - x-nullable: true - - ContainerdInfo: - description: | - Information for connecting to the containerd instance that is used by the daemon. - This is included for debugging purposes only. - type: "object" - properties: - Address: - description: "The address of the containerd socket." - type: "string" - example: "/run/containerd/containerd.sock" - Namespaces: - description: | - The namespaces that the daemon uses for running containers and - plugins in containerd. These namespaces can be configured in the - daemon configuration, and are considered to be used exclusively - by the daemon, Tampering with the containerd instance may cause - unexpected behavior. - - As these namespaces are considered to be exclusively accessed - by the daemon, it is not recommended to change these values, - or to change them to a value that is used by other systems, - such as cri-containerd. - type: "object" - properties: - Containers: - description: | - The default containerd namespace used for containers managed - by the daemon. - - The default namespace for containers is "moby", but will be - suffixed with the `.` of the remapped `root` if - user-namespaces are enabled and the containerd image-store - is used. - type: "string" - default: "moby" - example: "moby" - Plugins: - description: | - The default containerd namespace used for plugins managed by - the daemon. - - The default namespace for plugins is "plugins.moby", but will be - suffixed with the `.` of the remapped `root` if - user-namespaces are enabled and the containerd image-store - is used. - type: "string" - default: "plugins.moby" - example: "plugins.moby" - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - status: - description: | - Information specific to the runtime. - - While this API specification does not define data provided by runtimes, - the following well-known properties may be provided by runtimes: - - `org.opencontainers.runtime-spec.features`: features structure as defined - in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), - in a JSON string representation. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - - NetworkAttachmentConfig: - description: | - Specifies how a service should be attached to a particular network. - type: "object" - properties: - Target: - description: | - The target network for attachment. Must be a network name or ID. - type: "string" - Aliases: - description: | - Discoverable alternate names for the service on this network. - type: "array" - items: - type: "string" - DriverOpts: - description: | - Driver attachment options for the network target. - type: "object" - additionalProperties: - type: "string" - - EventActor: - description: | - Actor describes something that generates events, like a container, network, - or a volume. - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - description: | - Various key/value attributes of the object, depending on its type. - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-label-value" - image: "alpine:latest" - name: "my-container" - - EventMessage: - description: | - EventMessage represents the information an event contains. - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] - example: "container" - Action: - description: "The type of event" - type: "string" - example: "create" - Actor: - $ref: "#/definitions/EventActor" - scope: - description: | - Scope of the event. Engine events are `local` scope. Cluster (Swarm) - events are `swarm` scope. - type: "string" - enum: ["local", "swarm"] - time: - description: "Timestamp of event" - type: "integer" - format: "int64" - example: 1629574695 - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - example: 1629574695515050031 - - OCIDescriptor: - type: "object" - x-go-name: Descriptor - description: | - A descriptor struct containing digest, media type, and size, as defined in - the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). - properties: - mediaType: - description: | - The media type of the object this schema refers to. - type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" - digest: - description: | - The digest of the targeted content. - type: "string" - example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - size: - description: | - The size in bytes of the blob. - type: "integer" - format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" - - OCIPlatform: - type: "object" - x-go-name: Platform - description: | - Describes the platform which the image in the manifest runs on, as defined - in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). - properties: - architecture: - description: | - The CPU architecture, for example `amd64` or `ppc64`. - type: "string" - example: "arm" - os: - description: | - The operating system, for example `linux` or `windows`. - type: "string" - example: "windows" - os.version: - description: | - Optional field specifying the operating system version, for example on - Windows `10.0.19041.1165`. - type: "string" - example: "10.0.19041.1165" - os.features: - description: | - Optional field specifying an array of strings, each listing a required - OS feature (for example on Windows `win32k`). - type: "array" - items: - type: "string" - example: - - "win32k" - variant: - description: | - Optional field specifying a variant of the CPU, for example `v7` to - specify ARMv7 when architecture is `arm`. - type: "string" - example: "v7" - - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" - - ClusterVolume: - type: "object" - description: | - Options and information specific to, and only present on, Swarm CSI - cluster volumes. - properties: - ID: - type: "string" - description: | - The Swarm ID of this volume. Because cluster volumes are Swarm - objects, they have an ID, unlike non-cluster volumes. This ID can - be used to refer to the Volume instead of the name. - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - Info: - type: "object" - description: | - Information about the global status of the volume. - properties: - CapacityBytes: - type: "integer" - format: "int64" - description: | - The capacity of the volume in bytes. A value of 0 indicates that - the capacity is unknown. - VolumeContext: - type: "object" - description: | - A map of strings to strings returned from the storage plugin when - the volume is created. - additionalProperties: - type: "string" - VolumeID: - type: "string" - description: | - The ID of the volume as returned by the CSI storage plugin. This - is distinct from the volume's ID as provided by Docker. This ID - is never used by the user when communicating with Docker to refer - to this volume. If the ID is blank, then the Volume has not been - successfully created in the plugin yet. - AccessibleTopology: - type: "array" - description: | - The topology this volume is actually accessible from. - items: - $ref: "#/definitions/Topology" - PublishStatus: - type: "array" - description: | - The status of the volume as it pertains to its publishing and use on - specific nodes - items: - type: "object" - properties: - NodeID: - type: "string" - description: | - The ID of the Swarm node the volume is published on. - State: - type: "string" - description: | - The published state of the volume. - * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. - * `published` The volume is published successfully to the node. - * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. - * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. - enum: - - "pending-publish" - - "published" - - "pending-node-unpublish" - - "pending-controller-unpublish" - PublishContext: - type: "object" - description: | - A map of strings to strings returned by the CSI controller - plugin when a volume is published. - additionalProperties: - type: "string" - - ClusterVolumeSpec: - type: "object" - description: | - Cluster-specific options used to create the volume. - properties: - Group: - type: "string" - description: | - Group defines the volume group of this volume. Volumes belonging to - the same group can be referred to by group name when creating - Services. Referring to a volume by group instructs Swarm to treat - volumes in that group interchangeably for the purpose of scheduling. - Volumes with an empty string for a group technically all belong to - the same, emptystring group. - AccessMode: - type: "object" - description: | - Defines how the volume is used by tasks. - properties: - Scope: - type: "string" - description: | - The set of nodes this volume can be used on at one time. - - `single` The volume may only be scheduled to one node at a time. - - `multi` the volume may be scheduled to any supported number of nodes at a time. - default: "single" - enum: ["single", "multi"] - x-nullable: false - Sharing: - type: "string" - description: | - The number and way that different tasks can use this volume - at one time. - - `none` The volume may only be used by one task at a time. - - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - - `all` The volume may have any number of readers and writers. - default: "none" - enum: ["none", "readonly", "onewriter", "all"] - x-nullable: false - MountVolume: - type: "object" - description: | - Options for using this volume as a Mount-type volume. - - Either MountVolume or BlockVolume, but not both, must be - present. - properties: - FsType: - type: "string" - description: | - Specifies the filesystem type for the mount volume. - Optional. - MountFlags: - type: "array" - description: | - Flags to pass when mounting the volume. Optional. - items: - type: "string" - BlockVolume: - type: "object" - description: | - Options for using this volume as a Block-type volume. - Intentionally empty. - Secrets: - type: "array" - description: | - Swarm Secrets that are passed to the CSI storage plugin when - operating on this volume. - items: - type: "object" - description: | - One cluster volume secret entry. Defines a key-value pair that - is passed to the plugin. - properties: - Key: - type: "string" - description: | - Key is the name of the key of the key-value pair passed to - the plugin. - Secret: - type: "string" - description: | - Secret is the swarm Secret object from which to read data. - This can be a Secret name or ID. The Secret data is - retrieved by swarm and used as the value of the key-value - pair passed to the plugin. - AccessibilityRequirements: - type: "object" - description: | - Requirements for the accessible topology of the volume. These - fields are optional. For an in-depth description of what these - fields mean, see the CSI specification. - properties: - Requisite: - type: "array" - description: | - A list of required topologies, at least one of which the - volume must be accessible from. - items: - $ref: "#/definitions/Topology" - Preferred: - type: "array" - description: | - A list of topologies that the volume should attempt to be - provisioned in. - items: - $ref: "#/definitions/Topology" - CapacityRange: - type: "object" - description: | - The desired capacity that the volume should be created with. If - empty, the plugin will decide the capacity. - properties: - RequiredBytes: - type: "integer" - format: "int64" - description: | - The volume must be at least this big. The value of 0 - indicates an unspecified minimum - LimitBytes: - type: "integer" - format: "int64" - description: | - The volume must not be bigger than this. The value of 0 - indicates an unspecified maximum. - Availability: - type: "string" - description: | - The availability of the volume for use in tasks. - - `active` The volume is fully available for scheduling on the cluster - - `pause` No new workloads should use the volume, but existing workloads are not stopped. - - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. - default: "active" - x-nullable: false - enum: - - "active" - - "pause" - - "drain" - - Topology: - description: | - A map of topological domains to topological segments. For in depth - details, see documentation for the Topology object in the CSI - specification. - type: "object" - additionalProperties: - type: "string" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see the - [inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container - than inspecting a single container. For example, the list of linked - containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: | - Return all containers. By default, only running containers are shown. - type: "boolean" - default: false - - name: "limit" - in: "query" - description: | - Return this number of most recently created containers, including - non-running ones. - type: "integer" - - name: "size" - in: "query" - description: | - Return the size of container as fields `SizeRw` and `SizeRootFs`. - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a - `map[string][]string`). For example, `{"status": ["paused"]}` will - only return paused containers. - - Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - io.kubernetes.image.name: "ubuntu:latest" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.config.source: "api" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: | - Assign the specified name to the container. Must match - `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. - type: "string" - pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - - name: "platform" - in: "query" - description: | - Platform in the format `os[/arch[/variant]]` used for image lookup. - - When specified, the daemon checks if the requested image is present - in the local image cache with the given OS and Architecture, and - otherwise returns a `404` status. - - If the option is not set, the host's native OS and Architecture are - used to look up the image in the image cache. However, if no platform - is passed and the given image does exist in the local image cache, - but its OS or architecture does not match, the container is created - with the available image, and a warning is added to the `Warnings` - field in the response, for example; - - WARNING: The requested image's platform (linux/arm64/v8) does not - match the detected host platform (linux/amd64) and no - specific platform was requested - - type: "string" - default: "" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - $ref: "#/definitions/NetworkingConfig" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - NanoCpus: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: 0 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - database_nw: {} - - required: true - responses: - 201: - description: "Container created successfully" - schema: - $ref: "#/definitions/ContainerCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "overlay2" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: | - On Unix systems, this is done by running the `ps` command. This endpoint - is not supported on Windows. - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or - `journald` logging driver. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ContainerLogs" - responses: - 200: - description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not - upgrade the connection and does not set Content-Type. - schema: - type: "string" - format: "binary" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified ("C") - - `1`: Added ("A") - - `2`: Deleted ("D") - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - $ref: "#/definitions/FilesystemChange" - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of the *previous* read, and is - used to calculate the CPU usage percentage. It is not an exact copy - of the `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - - On a cgroup v2 host, the following fields are not set - * `blkio_stats`: all fields other than `io_service_bytes_recursive` - * `cpu_stats`: `cpu_usage.percpu_usage` - * `memory_stats`: `max_usage` and `failcnt` - Also, `memory_stats.stats` fields are incompatible with cgroup v1. - - To calculate the values shown by the `stats` command of the docker cli tool - the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` - * available_memory = `memory_stats.limit` - * Memory usage % = `(used_memory / available_memory) * 100.0` - * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` - * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` - * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: | - Stream the output. If false, the stats will be output once and then - it will disconnect. - type: "boolean" - default: true - - name: "one-shot" - in: "query" - description: | - Only get a single stat instead of waiting for 2 cycles. Must be used - with `stream=false`. - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container. Format is a - single character `[a-Z]` or `ctrl-` where `` is one - of: `a-z`, `@`, `^`, `[`, `,` or `_`. - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: | - Send a POSIX signal to a container, defaulting to killing to the - container. - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: | - Change various configuration options of a container without having to - recreate it. - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the freezer cgroup to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, - which is observable by the process being suspended. With the freezer - cgroup the process is unaware, and unable to capture, that it is being - suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach - to the same container multiple times and you can reattach to containers - that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint - to do anything. - - See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) - for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, - and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used - for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client - can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will - similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream - and the stream over the hijacked connected is multiplexed to separate out - `stdout` and `stderr`. The stream consists of a series of frames, each - containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or - `stderr`). It also contains the size of the associated frame encoded in - the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size - encoded as big endian. - - Following the header is the payload, which is the specified number of - bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream is not multiplexed. The data exchanged over the hijacked - connection is simply the raw data from the process PTY and client's - `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,` or `_`. - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you - want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been - returned, it will seamlessly transition into streaming current - output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: | - Stream attached streams from the time the request was made onwards. - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,`, or `_`. - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - $ref: "#/definitions/ContainerWaitResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: | - Wait until a container state reaches the given condition. - - Defaults to `not-running` if omitted or empty. - type: "string" - enum: - - "not-running" - - "next-exit" - - "removed" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: | - You cannot remove a running container: c2ada9df5af8. Stop the - container before attempting removal or force remove - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove anonymous volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: | - A response header `X-Docker-Container-Path-Stat` is returned, containing - a base64 - encoded JSON object with some filesystem header information - about the path. - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: | - A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: | - Upload a tar archive to be extracted to a path in the filesystem of container id. - `path` parameter is asserted to be a directory. If it exists as a file, 400 error - will be returned with message "not a directory". - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "not a directory" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: | - If `1`, `true`, or `True` then it will be an error if unpacking the - given content would cause an existing directory to be replaced with - a non-directory and vice versa. - type: "string" - - name: "copyUIDGID" - in: "query" - description: | - If `1`, `true`, then it will copy UID/GID maps to the dest file or - dir - type: "string" - - name: "inputStream" - in: "body" - required: true - description: | - The input stream must be a tar archive compressed with one of the - following algorithms: `identity` (no compression), `gzip`, `bzip2`, - or `xz`. - schema: - type: "string" - format: "binary" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the images list. - - Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - - `until=` - type: "string" - - name: "shared-size" - in: "query" - description: "Compute and show shared size as a `SharedSize` field on each image." - type: "boolean" - default: false - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: | - Sets the networking mode for the run commands during build. Supported - standard values are: `bridge`, `host`, `none`, and `container:`. - Any other value is taken as a custom network's name or ID to which this - container should connect to. - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - - name: "outputs" - in: "query" - description: "BuildKit output configuration" - type: "string" - default: "" - - name: "version" - in: "query" - type: "string" - default: "1" - enum: ["1", "2"] - description: | - Version of the builder backend to use. - - - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) - - `2` is [BuildKit](https://github.com/moby/buildkit) - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - parameters: - - name: "keep-storage" - in: "query" - description: "Amount of disk space in bytes to keep for cache" - type: "integer" - format: "int64" - - name: "all" - in: "query" - type: "boolean" - description: "Remove all types of build cache" - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the list of build cache objects. - - Available filters: - - - `until=` remove cache older than ``. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. - - `id=` - - `parent=` - - `type=` - - `description=` - - `inuse` - - `shared` - - `private` - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - CachesDeleted: - type: "array" - items: - description: "ID of build cache object" - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Pull or import an image." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "message" - in: "query" - description: "Set commit message for imported image." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "changes" - in: "query" - description: | - Apply `Dockerfile` instructions to the image that is created, - for example: `changes=ENV DEBUG=true`. - Note that `ENV DEBUG=true` should be URI component encoded. - - Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - type: "array" - items: - type: "string" - - name: "platform" - in: "query" - description: | - Platform in the format os[/arch[/variant]]. - - When used in combination with the `fromImage` option, the daemon checks - if the given image is present in the local image cache with the given - OS and Architecture, and otherwise attempts to pull the image. If the - option is not set, the host's native OS and Architecture are used. - If the given image does not exist in the local image cache, the daemon - attempts to pull the image with the host's native OS and Architecture. - If the given image does exists in the local image cache, but its OS or - architecture does not match, a warning is produced. - - When used with the `fromSrc` option to import an image from an archive, - this option sets the platform information for the imported image. If - the option is not set, the host's native OS and Architecture are used - for the imported image. - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/ImageInspect" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must - already have a tag which references the registry. For example, - `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - required: true - - name: "platform" - in: "query" - description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" - type: "string" - x-nullable: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - description: | - Whether this repository has automated builds enabled. - -


- - > **Deprecated**: This field is deprecated and will always be "false". - type: "boolean" - example: false - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" - is_official: true - is_automated: false - name: "alpine" - star_count: 10093 - - description: "Busybox base image." - is_official: true - is_automated: false - name: "Busybox base image." - star_count: 3037 - - description: "The PostgreSQL object-relational database system provides reliability and data integrity." - is_official: true - is_automated: false - name: "postgres" - star_count: 12408 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: | - Validate credentials for a registry and, if available, get an identity - token for accessing the registry without password. - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 401: - description: "Auth error" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/SystemVersion" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: | - Default version of docker image builder - - The default on Linux is version "2" (BuildKit), but the daemon - can be configured to recommend version "1" (classic Builder). - Windows does not yet support BuildKit for native Windows images, - and uses "1" (classic builder) as a default. - - This value is a recommendation as advertised by the daemon, and - it is up to the client to choose which builder to use. - default: "2" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - headers: - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - tags: ["System"] - head: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPingHead" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "(empty)" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - - Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` - - Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - The Builder reports `prune` events - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/EventMessage" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - BuildCache: - type: "array" - items: - $ref: "#/definitions/BuildCache" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - BuildCache: - - - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parents: [] - Type: "regular" - Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" - InUse: false - Shared: true - Size: 0 - CreatedAt: "2021-06-28T13:31:01.474619385Z" - LastUsedAt: "2021-07-07T22:02:32.738075951Z" - UsageCount: 26 - - - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] - Type: "regular" - Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: false - Shared: true - Size: 51 - CreatedAt: "2021-06-28T13:31:03.002625487Z" - LastUsedAt: "2021-07-07T22:02:32.773909517Z" - UsageCount: 26 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "type" - in: "query" - description: | - Object types, for which to compute and return data. - type: "array" - collectionFormat: multi - items: - type: "string" - enum: ["container", "image", "volume", "build-cache"] - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image - repositories. - - For each value of the `names` parameter: if it is a specific name and - tag (e.g. `ubuntu:latest`), then only that image (and its parents) are - returned; if it is an image ID, similarly only that image (and its parents) - are returned and there would be no names referenced in the 'repositories' - file for this image ID. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - title: "ExecConfig" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - DetachKeys: - type: "string" - description: | - Override the key sequence for detaching a container. Format is - a single character `[a-Z]` or `ctrl-` where `` - is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: | - A list of environment variables in the form `["VAR=value", ...]`. - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: | - The user, and optionally, group to run the exec process inside - the container. Format is one of: `user`, `user:group`, `uid`, - or `uid:gid`. - WorkingDir: - type: "string" - description: | - The working directory for the exec process inside the container. - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: | - Starts a previously set up exec instance. If detach is true, this endpoint - returns immediately after starting the command. Otherwise, it sets up an - interactive session with the command. - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - title: "ExecStartConfig" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: | - Resize the TTY session used by an exec instance. This endpoint only works - if `tty` was specified as part of creating and starting the exec instance. - operationId: "ExecResize" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - $ref: "#/definitions/VolumeListResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - $ref: "#/definitions/VolumeCreateOptions" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - put: - summary: | - "Update a volume. Valid only for Swarm cluster volumes" - operationId: "VolumeUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name or ID of the volume" - type: "string" - required: true - - name: "body" - in: "body" - schema: - # though the schema for is an object that contains only a - # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object - # means that if, later on, we support things like changing the - # labels, we can do so without duplicating that information to the - # ClusterVolumeSpec. - type: "object" - description: "Volume configuration" - properties: - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - description: | - The spec of the volume to update. Currently, only Availability may - change. All other fields must remain unchanged. - - name: "version" - in: "query" - description: | - The version number of the volume being updated. This is required to - avoid conflicting writes. Found in the volume's `ClusterVolume` - field. - type: "integer" - format: "int64" - required: true - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see the - [network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than - inspecting a single network. For example, the list of containers attached - to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process - on the networks list. - - Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - networks that are not in use by a container. When set to `false` - (or `0`), only networks that are in use by one or more - containers are returned. - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "Network created successfully" - schema: - $ref: "#/definitions/NetworkCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: | - Forbidden operation. This happens when trying to create a network named after a pre-defined network, - or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - title: "NetworkCreateRequest" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - example: "my_network" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - example: "bridge" - Scope: - description: | - The level at which the network exists (e.g. `swarm` for cluster-wide - or `local` for machine level). - type: "string" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: | - Globally scoped network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - example: true - Ingress: - description: | - Ingress network is the network which provides the routing-mesh - in swarm mode. - type: "boolean" - example: false - ConfigOnly: - description: | - Creates a config-only network. Config-only networks are placeholder - networks for network configurations to be used by other networks. - Config-only networks cannot be used directly to run containers - or services. - type: "boolean" - default: false - example: false - ConfigFrom: - description: | - Specifies the source which will provide the configuration for - this network. The specified network must be an existing - config-only network; see ConfigOnly. - $ref: "#/definitions/ConfigReference" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - example: true - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - example: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "Operation forbidden" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkConnectRequest" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - MacAddress: "02:42:ac:12:05:02" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkDisconnectRequest" - properties: - Container: - type: "string" - description: | - The ID or name of the container to disconnect from the network. - Force: - type: "boolean" - description: | - Force the container to disconnect from the network. - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the plugin list. - - Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be - enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Disable the plugin before removing. This may result in issues if the - plugin is in use by a container. - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Force disable a plugin even if still in use. - required: false - type: "boolean" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `node.label=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: | - The version number of the node object being updated. This is required - to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmInitRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication, as well - as determining the networking interface used for the VXLAN - Tunnel Endpoint (VTEP). This can either be an address/port - combination in the form `192.168.1.1:4567`, or an interface - followed by a port number, like `eth0:4567`. If the port number - is omitted, the default swarm listening port is used. - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - type: "string" - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - if no port is set or is set to 0, default port 4789 will be used. - type: "integer" - format: "uint32" - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global - scope networks. - type: "array" - items: - type: "string" - example: ["10.10.0.0/16", "20.20.0.0/16"] - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created - from the default subnet pool. - type: "integer" - format: "uint32" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathPort: 4789 - DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] - SubnetSize: 24 - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmJoinRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication if the node - gets promoted to manager, as well as determining the networking - interface used for the VXLAN Tunnel Endpoint (VTEP). - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: | - Addresses of manager nodes already participating in the swarm. - type: "array" - items: - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: | - Force leave swarm, even if this is the last manager or that it will - break the cluster. - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: | - The version number of the swarm object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmUnlockRequest" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the services list. - - Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - - name: "status" - in: "query" - type: "boolean" - description: | - Include service status, with count of running and desired tasks. - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/ServiceCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - OomScoreAdj: 0 - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - OomScoreAdj: 0 - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: | - The version number of the service object being updated. This is - required to avoid conflicting writes. - This version number should be the value as currently set on the - service *before* the update. You can find the current version by - calling `GET /services/{id}` - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - description: | - If the `X-Registry-Auth` header is not specified, this parameter - indicates where to find registry authorization credentials. - type: "string" - enum: ["spec", "previous-spec"] - default: "spec" - - name: "rollback" - in: "query" - description: | - Set to this parameter to `previous` to cause a server-side rollback - to the previous service spec. The supplied spec will be ignored in - this case. - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. See also - [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ServiceLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the tasks list. - - Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Task"] - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the secrets list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: | - The spec of the secret to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [SecretInspect endpoint](#operation/SecretInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the secret object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the configs list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: | - The spec of the config to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [ConfigInspect endpoint](#operation/ConfigInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the config object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: | - Return image digest and platform information by contacting the registry. - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - $ref: "#/definitions/DistributionInspect" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to - call back to the client for advanced capabilities. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows - the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon responds with a `101 UPGRADED` response follow with - the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90..000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/list.go b/vendor/github.com/docker/docker/api/types/checkpoint/list.go deleted file mode 100644 index 94a9c0a47..000000000 --- a/vendor/github.com/docker/docker/api/types/checkpoint/list.go +++ /dev/null @@ -1,7 +0,0 @@ -package checkpoint - -// Summary represents the details of a checkpoint when listing endpoints. -type Summary struct { - // Name is the name of the checkpoint. - Name string -} diff --git a/vendor/github.com/docker/docker/api/types/checkpoint/options.go b/vendor/github.com/docker/docker/api/types/checkpoint/options.go deleted file mode 100644 index 9477458c2..000000000 --- a/vendor/github.com/docker/docker/api/types/checkpoint/options.go +++ /dev/null @@ -1,19 +0,0 @@ -package checkpoint - -// CreateOptions holds parameters to create a checkpoint from a container. -type CreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// ListOptions holds parameters to list checkpoints for a container. -type ListOptions struct { - CheckpointDir string -} - -// DeleteOptions holds parameters to delete a checkpoint from a container. -type DeleteOptions struct { - CheckpointID string - CheckpointDir string -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index df791f02a..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,257 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "context" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" -) - -// NewHijackedResponse intializes a HijackedResponse type -func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { - return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - mediaType string - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. -// returns false if HTTP Content-Type is not relevant, and container must be inspected -func (h *HijackedResponse) MediaType() (string, bool) { - if h.mediaType == "" { - return "", false - } - return h.mediaType, true -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*container.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]registry.AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the underlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func(context.Context) (string, error) - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/container/change_type.go b/vendor/github.com/docker/docker/api/types/container/change_type.go deleted file mode 100644 index fe8d6d369..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_type.go +++ /dev/null @@ -1,15 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ChangeType Kind of change -// -// Can be one of: -// -// - `0`: Modified ("C") -// - `1`: Added ("A") -// - `2`: Deleted ("D") -// -// swagger:model ChangeType -type ChangeType uint8 diff --git a/vendor/github.com/docker/docker/api/types/container/change_types.go b/vendor/github.com/docker/docker/api/types/container/change_types.go deleted file mode 100644 index 3a3a83866..000000000 --- a/vendor/github.com/docker/docker/api/types/container/change_types.go +++ /dev/null @@ -1,23 +0,0 @@ -package container - -const ( - // ChangeModify represents the modify operation. - ChangeModify ChangeType = 0 - // ChangeAdd represents the add operation. - ChangeAdd ChangeType = 1 - // ChangeDelete represents the delete operation. - ChangeDelete ChangeType = 2 -) - -func (ct ChangeType) String() string { - switch ct { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - default: - return "" - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index d6b03e8b2..000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,73 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - dockerspec "github.com/moby/docker-image-spec/specs-go/v1" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// StopOptions holds the options to stop or restart a container. -type StopOptions struct { - // Signal (optional) is the signal to send to the container to (gracefully) - // stop it before forcibly terminating the container with SIGKILL after the - // timeout expires. If not value is set, the default (SIGTERM) is used. - Signal string `json:",omitempty"` - - // Timeout (optional) is the timeout (in seconds) to wait for the container - // to stop gracefully before forcibly terminating it with SIGKILL. - // - // - Use nil to use the default timeout (10 seconds). - // - Use '-1' to wait indefinitely. - // - Use '0' to not wait for the container to exit gracefully, and - // immediately proceeds to forcibly terminating the container. - // - Other positive values are used as timeout (in seconds). - Timeout *int `json:",omitempty"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig = dockerspec.HealthcheckConfig - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - // Mac Address of the container. - // - // Deprecated: this field is deprecated since API v1.44. Use EndpointSettings.MacAddress instead. - MacAddress string `json:",omitempty"` - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go deleted file mode 100644 index 711af12c9..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container.go +++ /dev/null @@ -1,44 +0,0 @@ -package container - -import ( - "io" - "os" - "time" -) - -// PruneReport contains the response for Engine API: -// POST "/containers/prune" -type PruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// PathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type PathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats -// for a container, as produced by the GET "/stats" endpoint. -// -// The OSType field is set to the server's platform to allow -// platform-specific handling of the response. -// -// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse]. -type StatsResponseReader struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da36..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_request.go b/vendor/github.com/docker/docker/api/types/container/create_request.go deleted file mode 100644 index e98dd6ad4..000000000 --- a/vendor/github.com/docker/docker/api/types/container/create_request.go +++ /dev/null @@ -1,13 +0,0 @@ -package container - -import "github.com/docker/docker/api/types/network" - -// CreateRequest is the request message sent to the server for container -// create calls. It is a config wrapper that holds the container [Config] -// (portable) and the corresponding [HostConfig] (non-portable) and -// [network.NetworkingConfig]. -type CreateRequest struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty"` - NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go deleted file mode 100644 index aa0e7f7d0..000000000 --- a/vendor/github.com/docker/docker/api/types/container/create_response.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateResponse ContainerCreateResponse -// -// OK response to ContainerCreate operation -// swagger:model CreateResponse -type CreateResponse struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/errors.go b/vendor/github.com/docker/docker/api/types/container/errors.go deleted file mode 100644 index 32c978037..000000000 --- a/vendor/github.com/docker/docker/api/types/container/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package container - -type errInvalidParameter struct{ error } - -func (e *errInvalidParameter) InvalidParameter() {} - -func (e *errInvalidParameter) Unwrap() error { - return e.error -} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go deleted file mode 100644 index 96093eb5c..000000000 --- a/vendor/github.com/docker/docker/api/types/container/exec.go +++ /dev/null @@ -1,43 +0,0 @@ -package container - -// ExecOptions is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecOptions struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// ExecStartOptions is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartOptions struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - -// ExecAttachOptions is a temp struct used by execAttach. -// -// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached. -type ExecAttachOptions = ExecStartOptions - -// ExecInspect holds information returned by exec inspect. -type ExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} diff --git a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go b/vendor/github.com/docker/docker/api/types/container/filesystem_change.go deleted file mode 100644 index 9e9c2ad1d..000000000 --- a/vendor/github.com/docker/docker/api/types/container/filesystem_change.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// FilesystemChange Change in the container's filesystem. -// -// swagger:model FilesystemChange -type FilesystemChange struct { - - // kind - // Required: true - Kind ChangeType `json:"Kind"` - - // Path to file or directory that has changed. - // - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go deleted file mode 100644 index 727da8839..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ /dev/null @@ -1,500 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// cgroup namespace modes for containers -const ( - CgroupnsModeEmpty CgroupnsMode = "" - CgroupnsModePrivate CgroupnsMode = "private" - CgroupnsModeHost CgroupnsMode = "host" -) - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == CgroupnsModePrivate -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == CgroupnsModeHost -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == CgroupnsModeEmpty -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// Isolation modes for containers -const ( - IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) - IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon - IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode - IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode -) - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - // TODO consider making isolation-mode strict (case-sensitive) - v := Isolation(strings.ToLower(string(i))) - return v == IsolationDefault || v == IsolationEmpty -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationHyperV -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationProcess -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IpcMode constants -const ( - IPCModeNone IpcMode = "none" - IPCModeHost IpcMode = "host" - IPCModeContainer IpcMode = "container" - IPCModePrivate IpcMode = "private" - IPCModeShareable IpcMode = "shareable" -) - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == IPCModePrivate -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == IPCModeHost -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == IPCModeShareable -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == IPCModeNone -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == network.NetworkNone -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == network.NetworkDefault -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !n.IsHost() -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - return n == "" || n.IsHost() -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - _, ok := containerID(string(c)) - return ok -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - // TODO(thaJeztah): align with PidMode, and consider container-mode without a container name/ID to be invalid. - return c == "" || c.IsContainer() -} - -// Container returns the ID or name of the container whose cgroup will be used. -func (c CgroupSpec) Container() (idOrName string) { - idOrName, _ = containerID(string(c)) - return idOrName -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !n.IsHost() -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - return n == "" || n.IsHost() -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - _, ok := containerID(string(n)) - return ok -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - return n == "" || n.IsHost() || validContainer(string(n)) -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() (idOrName string) { - idOrName, _ = containerID(string(n)) - return idOrName -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name RestartPolicyMode - MaximumRetryCount int -} - -type RestartPolicyMode string - -const ( - RestartPolicyDisabled RestartPolicyMode = "no" - RestartPolicyAlways RestartPolicyMode = "always" - RestartPolicyOnFailure RestartPolicyMode = "on-failure" - RestartPolicyUnlessStopped RestartPolicyMode = "unless-stopped" -) - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == RestartPolicyDisabled || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == RestartPolicyAlways -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == RestartPolicyOnFailure -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == RestartPolicyUnlessStopped -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// ValidateRestartPolicy validates the given RestartPolicy. -func ValidateRestartPolicy(policy RestartPolicy) error { - switch policy.Name { - case RestartPolicyAlways, RestartPolicyUnlessStopped, RestartPolicyDisabled: - if policy.MaximumRetryCount != 0 { - msg := "invalid restart policy: maximum retry count can only be used with 'on-failure'" - if policy.MaximumRetryCount < 0 { - msg += " and cannot be negative" - } - return &errInvalidParameter{fmt.Errorf(msg)} - } - return nil - case RestartPolicyOnFailure: - if policy.MaximumRetryCount < 0 { - return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} - } - return nil - case "": - // Versions before v25.0.0 created an empty restart-policy "name" as - // default. Allow an empty name with "any" MaximumRetryCount for - // backward-compatibility. - return nil - default: - return &errInvalidParameter{fmt.Errorf("invalid restart policy: unknown policy '%s'; use one of '%s', '%s', '%s', or '%s'", policy.Name, RestartPolicyDisabled, RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyUnlessStopped)} - } -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset LogMode = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Ulimit is an alias for [units.Ulimit], which may be moving to a different -// location or become a local type. This alias is to help transitioning. -// -// Users are recommended to use this alias instead of using [units.Ulimit] directly. -type Ulimit = units.Ulimit - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - - // KernelMemory specifies the kernel memory limit (in bytes) for the container. - // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - ConsoleSize [2]uint // Initial console size (height,width) - Annotations map[string]string `json:",omitempty"` // Arbitrary non-identifying metadata attached to container and provided to the runtime - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} - -// containerID splits "container:" values. It returns the container -// ID or name, and whether an ID/name was found. It returns an empty string and -// a "false" if the value does not have a "container:" prefix. Further validation -// of the returned, including checking if the value is empty, should be handled -// by the caller. -func containerID(val string) (idOrName string, ok bool) { - k, v, hasSep := strings.Cut(val, ":") - if !hasSep || k != "container" { - return "", false - } - return v, true -} - -// validContainer checks if the given value is a "container:" mode with -// a non-empty name/ID. -func validContainer(val string) bool { - id, ok := containerID(val) - return ok && id != "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index cdee49ea3..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build !windows - -package container // import "github.com/docker/docker/api/types/container" - -import "github.com/docker/docker/api/types/network" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == network.NetworkBridge -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == network.NetworkHost -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - switch { - case n.IsDefault(): - return network.NetworkDefault - case n.IsBridge(): - return network.NetworkBridge - case n.IsHost(): - return network.NetworkHost - case n.IsNone(): - return network.NetworkNone - case n.IsContainer(): - return "container" - case n.IsUserDefined(): - return n.UserDefined() - default: - return "" - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index f08545542..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,47 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import "github.com/docker/docker/api/types/network" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == network.NetworkNat -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - switch { - case n.IsDefault(): - return network.NetworkDefault - case n.IsBridge(): - return network.NetworkNat - case n.IsHost(): - // Windows currently doesn't support host network-mode, so - // this would currently never happen.. - return network.NetworkHost - case n.IsNone(): - return network.NetworkNone - case n.IsContainer(): - return "container" - case n.IsUserDefined(): - return n.UserDefined() - default: - return "" - } -} diff --git a/vendor/github.com/docker/docker/api/types/container/options.go b/vendor/github.com/docker/docker/api/types/container/options.go deleted file mode 100644 index 7a2300576..000000000 --- a/vendor/github.com/docker/docker/api/types/container/options.go +++ /dev/null @@ -1,67 +0,0 @@ -package container - -import "github.com/docker/docker/api/types/filters" - -// ResizeOptions holds parameters to resize a TTY. -// It can be used to resize container TTYs and -// exec process TTYs too. -type ResizeOptions struct { - Height uint - Width uint -} - -// AttachOptions holds parameters to attach to a container. -type AttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// CommitOptions holds parameters to commit changes into a container. -type CommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *Config -} - -// RemoveOptions holds parameters to remove containers. -type RemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// StartOptions holds parameters to start containers. -type StartOptions struct { - CheckpointID string - CheckpointDir string -} - -// ListOptions holds parameters to list containers with. -type ListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// LogsOptions holds parameters to filter logs with. -type LogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go deleted file mode 100644 index 3b3fb131a..000000000 --- a/vendor/github.com/docker/docker/api/types/container/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -package container - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsResponse is newly used Networks. -// -// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801 -type StatsResponse struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go deleted file mode 100644 index ab56d4eed..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go +++ /dev/null @@ -1,12 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitExitError container waiting error, if any -// swagger:model WaitExitError -type WaitExitError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go deleted file mode 100644 index 84fc6afdd..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitResponse ContainerWaitResponse -// -// OK response to ContainerWait operation -// swagger:model WaitResponse -type WaitResponse struct { - - // error - Error *WaitExitError `json:"Error,omitempty"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99..000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index e225df4ec..000000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,135 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" -import "github.com/docker/docker/api/types/filters" - -// Type is used for event-types. -type Type string - -// List of known event types. -const ( - BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. - ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. - ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. - DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. - ImageEventType Type = "image" // ImageEventType is the event type that images generate. - NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. - NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. - PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. - SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. - ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. - VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. -) - -// Action is used for event-actions. -type Action string - -const ( - ActionCreate Action = "create" - ActionStart Action = "start" - ActionRestart Action = "restart" - ActionStop Action = "stop" - ActionCheckpoint Action = "checkpoint" - ActionPause Action = "pause" - ActionUnPause Action = "unpause" - ActionAttach Action = "attach" - ActionDetach Action = "detach" - ActionResize Action = "resize" - ActionUpdate Action = "update" - ActionRename Action = "rename" - ActionKill Action = "kill" - ActionDie Action = "die" - ActionOOM Action = "oom" - ActionDestroy Action = "destroy" - ActionRemove Action = "remove" - ActionCommit Action = "commit" - ActionTop Action = "top" - ActionCopy Action = "copy" - ActionArchivePath Action = "archive-path" - ActionExtractToDir Action = "extract-to-dir" - ActionExport Action = "export" - ActionImport Action = "import" - ActionSave Action = "save" - ActionLoad Action = "load" - ActionTag Action = "tag" - ActionUnTag Action = "untag" - ActionPush Action = "push" - ActionPull Action = "pull" - ActionPrune Action = "prune" - ActionDelete Action = "delete" - ActionEnable Action = "enable" - ActionDisable Action = "disable" - ActionConnect Action = "connect" - ActionDisconnect Action = "disconnect" - ActionReload Action = "reload" - ActionMount Action = "mount" - ActionUnmount Action = "unmount" - - // ActionExecCreate is the prefix used for exec_create events. These - // event-actions are commonly followed by a colon and space (": "), - // and the command that's defined for the exec, for example: - // - // exec_create: /bin/sh -c 'echo hello' - // - // This is far from ideal; it's a compromise to allow filtering and - // to preserve backward-compatibility. - ActionExecCreate Action = "exec_create" - // ActionExecStart is the prefix used for exec_create events. These - // event-actions are commonly followed by a colon and space (": "), - // and the command that's defined for the exec, for example: - // - // exec_start: /bin/sh -c 'echo hello' - // - // This is far from ideal; it's a compromise to allow filtering and - // to preserve backward-compatibility. - ActionExecStart Action = "exec_start" - ActionExecDie Action = "exec_die" - ActionExecDetach Action = "exec_detach" - - // ActionHealthStatus is the prefix to use for health_status events. - // - // Health-status events can either have a pre-defined status, in which - // case the "health_status" action is followed by a colon, or can be - // "free-form", in which case they're followed by the output of the - // health-check output. - // - // This is far form ideal, and a compromise to allow filtering, and - // to preserve backward-compatibility. - ActionHealthStatus Action = "health_status" - ActionHealthStatusRunning Action = "health_status: running" - ActionHealthStatusHealthy Action = "health_status: healthy" - ActionHealthStatusUnhealthy Action = "health_status: unhealthy" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set of attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - - Type Type - Action Action - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} - -// ListOptions holds parameters to filter events with. -type ListOptions struct { - Since string - Until string - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/filters/errors.go b/vendor/github.com/docker/docker/api/types/filters/errors.go deleted file mode 100644 index f52f69440..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -package filters - -import "fmt" - -// invalidFilter indicates that the provided filter or its value is invalid -type invalidFilter struct { - Filter string - Value []string -} - -func (e invalidFilter) Error() string { - msg := "invalid filter" - if e.Filter != "" { - msg += " '" + e.Filter - if e.Value != nil { - msg = fmt.Sprintf("%s=%s", msg, e.Value) - } - msg += "'" - } - return msg -} - -// InvalidParameter marks this error as ErrInvalidParameter -func (e invalidFilter) InvalidParameter() {} - -// unreachableCode is an error indicating that the code path was not expected to be reached. -type unreachableCode struct { - Filter string - Value []string -} - -// System marks this error as ErrSystem -func (e unreachableCode) System() {} - -func (e unreachableCode) Error() string { - return fmt.Sprintf("unreachable code reached for filter: %q with values: %s", e.Filter, e.Value) -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 0c39ab5f1..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, &invalidFilter{} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testK, testV, hasValue := strings.Cut(value, "=") - - v, ok := sources[testK] - if !ok { - return false - } - if hasValue && testV != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. -// Error is not nil only if the filter values are not valid boolean or are conflicting. -func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { - fieldValues, ok := args.fields[key] - - if !ok { - return defaultValue, nil - } - - if len(fieldValues) == 0 { - return defaultValue, &invalidFilter{key, nil} - } - - isFalse := fieldValues["0"] || fieldValues["false"] - isTrue := fieldValues["1"] || fieldValues["true"] - - conflicting := isFalse && isTrue - invalid := !isFalse && !isTrue - - if conflicting || invalid { - return defaultValue, &invalidFilter{key, args.Get(key)} - } else if isFalse { - return false, nil - } else if isTrue { - return true, nil - } - - // This code shouldn't be reached. - return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return &invalidFilter{name, nil} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index ce3deb331..000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about the storage driver used to store the container's and -// image's filesystem. -// -// swagger:model GraphDriverData -type GraphDriverData struct { - - // Low-level storage metadata, provided as key/value pairs. - // - // This information is driver-specific, and depends on the storage-driver - // in use, and should be used for informational purposes only. - // - // Required: true - Data map[string]string `json:"Data"` - - // Name of the storage driver. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/delete_response.go b/vendor/github.com/docker/docker/api/types/image/delete_response.go deleted file mode 100644 index 998620dc6..000000000 --- a/vendor/github.com/docker/docker/api/types/image/delete_response.go +++ /dev/null @@ -1,15 +0,0 @@ -package image - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// DeleteResponse delete response -// swagger:model DeleteResponse -type DeleteResponse struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go deleted file mode 100644 index abb7ffd80..000000000 --- a/vendor/github.com/docker/docker/api/types/image/image.go +++ /dev/null @@ -1,47 +0,0 @@ -package image - -import ( - "io" - "time" -) - -// Metadata contains engine-local data about the image. -type Metadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` -} - -// PruneReport contains the response for Engine API: -// POST "/images/prune" -type PruneReport struct { - ImagesDeleted []DeleteResponse - SpaceReclaimed uint64 -} - -// LoadResponse returns information to the client about a load process. -// -// TODO(thaJeztah): remove this type, and just use an io.ReadCloser -// -// This type was added in https://github.com/moby/moby/pull/18878, related -// to https://github.com/moby/moby/issues/19177; -// -// Make docker load to output json when the response content type is json -// Swarm hijacks the response from docker load and returns JSON rather -// than plain text like the Engine does. This makes the API library to return -// information to figure that out. -// -// However the "load" endpoint unconditionally returns JSON; -// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255 -// -// PR https://github.com/moby/moby/pull/21959 made the response-type depend -// on whether "quiet" was set, but this logic got changed in a follow-up -// https://github.com/moby/moby/pull/25557, which made the JSON response-type -// unconditionally, but the output produced depend on whether"quiet" was set. -// -// We should deprecated the "quiet" option, as it's really a client -// responsibility. -type LoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index e302bb0ae..000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,36 +0,0 @@ -package image // import "github.com/docker/docker/api/types/image" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go deleted file mode 100644 index 8e32c9af8..000000000 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ /dev/null @@ -1,85 +0,0 @@ -package image - -import ( - "context" - "io" - - "github.com/docker/docker/api/types/filters" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImportSource holds source information for ImageImport -type ImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImportOptions holds information to import images from the client host. -type ImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// CreateOptions holds information to create images. -type CreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// PullOptions holds information to pull images. -type PullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. - PrivilegeFunc func(context.Context) (string, error) - Platform string -} - -// PushOptions holds information to push images. -type PushOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - - // PrivilegeFunc is a function that clients can supply to retry operations - // after getting an authorization error. This function returns the registry - // authentication header value in base64 encoded format, or an error if the - // privilege request fails. - // - // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. - PrivilegeFunc func(context.Context) (string, error) - - // Platform is an optional field that selects a specific platform to push - // when the image is a multi-platform image. - // Using this will only push a single platform-specific manifest. - Platform *ocispec.Platform `json:",omitempty"` -} - -// ListOptions holds parameters to list images with. -type ListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - -// RemoveOptions holds parameters to remove images. -type RemoveOptions struct { - Force bool - PruneChildren bool -} diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go deleted file mode 100644 index f1e3e2ef0..000000000 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ /dev/null @@ -1,89 +0,0 @@ -package image - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Summary summary -// swagger:model Summary -type Summary struct { - - // Number of containers using this image. Includes both stopped and running - // containers. - // - // This size is not calculated by default, and depends on which API endpoint - // is used. `-1` indicates that the value has not been set / calculated. - // - // Required: true - Containers int64 `json:"Containers"` - - // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). - // - // Required: true - Created int64 `json:"Created"` - - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - // - // Required: true - ID string `json:"Id"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - // - // Required: true - ParentID string `json:"ParentId"` - - // List of content-addressable digests of locally available image manifests - // that the image is referenced from. Multiple manifests can refer to the - // same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - // - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // List of image names/tags in the local image cache that reference this - // image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - // - // Required: true - RepoTags []string `json:"RepoTags"` - - // Total size of image layers that are shared between this image and other - // images. - // - // This size is not calculated by default. `-1` indicates that the value - // has not been set / calculated. - // - // Required: true - SharedSize int64 `json:"SharedSize"` - - // Total size of the image including all layers it is composed of. - // - // Required: true - Size int64 `json:"Size"` - - // Total size of the image including all layers it is composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index c68dcf65b..000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,150 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" - // TypeCluster is the type for Swarm Cluster Volumes. - TypeCluster Type = "cluster" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` // attempts recursive read-only if possible - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` - ClusterOptions *ClusterOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` - CreateMountpoint bool `json:",omitempty"` - // ReadOnlyNonRecursive makes the mount non-recursively read-only, but still leaves the mount recursive - // (unless NonRecursive is set to true in conjunction). - ReadOnlyNonRecursive bool `json:",omitempty"` - // ReadOnlyForceRecursive raises an error if the mount cannot be made recursively read-only. - ReadOnlyForceRecursive bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Subpath string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - // Options to be passed to the tmpfs mount. An array of arrays. Flag - // options should be provided as 1-length arrays. Other types should be - // provided as 2-length arrays, where the first item is the key and the - // second the value. - Options [][]string `json:",omitempty"` - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} - -// ClusterOptions specifies options for a Cluster volume. -type ClusterOptions struct { - // intentionally empty -} diff --git a/vendor/github.com/docker/docker/api/types/network/create_response.go b/vendor/github.com/docker/docker/api/types/network/create_response.go deleted file mode 100644 index c32b35bff..000000000 --- a/vendor/github.com/docker/docker/api/types/network/create_response.go +++ /dev/null @@ -1,19 +0,0 @@ -package network - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateResponse NetworkCreateResponse -// -// OK response to NetworkCreate operation -// swagger:model CreateResponse -type CreateResponse struct { - - // The ID of the created network. - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warning string `json:"Warning"` -} diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go deleted file mode 100644 index 0fbb40b35..000000000 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ /dev/null @@ -1,147 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "net" - - "github.com/docker/docker/internal/multierror" -) - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint. - // MacAddress may be used to specify a MAC address when the container is created. - // Once the container is running, it becomes operational data (it may contain a - // generated address). - MacAddress string - DriverOpts map[string]string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to - // generate PTR records. - DNSNames []string -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - - if len(es.DNSNames) > 0 { - epCopy.DNSNames = make([]string, len(es.DNSNames)) - copy(epCopy.DNSNames, es.DNSNames) - } - - return &epCopy -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// NetworkSubnet describes a user-defined subnet for a specific network. It's only used to validate if an -// EndpointIPAMConfig is valid for a specific network. -type NetworkSubnet interface { - // Contains checks whether the NetworkSubnet contains [addr]. - Contains(addr net.IP) bool - // IsStatic checks whether the subnet was statically allocated (ie. user-defined). - IsStatic() bool -} - -// IsInRange checks whether static IP addresses are valid in a specific network. -func (cfg *EndpointIPAMConfig) IsInRange(v4Subnets []NetworkSubnet, v6Subnets []NetworkSubnet) error { - var errs []error - - if err := validateEndpointIPAddress(cfg.IPv4Address, v4Subnets); err != nil { - errs = append(errs, err) - } - if err := validateEndpointIPAddress(cfg.IPv6Address, v6Subnets); err != nil { - errs = append(errs, err) - } - - return multierror.Join(errs...) -} - -func validateEndpointIPAddress(epAddr string, ipamSubnets []NetworkSubnet) error { - if epAddr == "" { - return nil - } - - var staticSubnet bool - parsedAddr := net.ParseIP(epAddr) - for _, subnet := range ipamSubnets { - if subnet.IsStatic() { - staticSubnet = true - if subnet.Contains(parsedAddr) { - return nil - } - } - } - - if staticSubnet { - return fmt.Errorf("no configured subnet or ip-range contain the IP address %s", epAddr) - } - - return errors.New("user specified IP address is supported only when connecting to networks with user configured subnets") -} - -// Validate checks whether cfg is valid. -func (cfg *EndpointIPAMConfig) Validate() error { - if cfg == nil { - return nil - } - - var errs []error - - if cfg.IPv4Address != "" { - if addr := net.ParseIP(cfg.IPv4Address); addr == nil || addr.To4() == nil || addr.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid IPv4 address: %s", cfg.IPv4Address)) - } - } - if cfg.IPv6Address != "" { - if addr := net.ParseIP(cfg.IPv6Address); addr == nil || addr.To4() != nil || addr.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid IPv6 address: %s", cfg.IPv6Address)) - } - } - for _, addr := range cfg.LinkLocalIPs { - if parsed := net.ParseIP(addr); parsed == nil || parsed.IsUnspecified() { - errs = append(errs, fmt.Errorf("invalid link-local IP address: %s", addr)) - } - } - - return multierror.Join(errs...) -} diff --git a/vendor/github.com/docker/docker/api/types/network/ipam.go b/vendor/github.com/docker/docker/api/types/network/ipam.go deleted file mode 100644 index f319e1402..000000000 --- a/vendor/github.com/docker/docker/api/types/network/ipam.go +++ /dev/null @@ -1,134 +0,0 @@ -package network - -import ( - "errors" - "fmt" - "net/netip" - - "github.com/docker/docker/internal/multierror" -) - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -type ipFamily string - -const ( - ip4 ipFamily = "IPv4" - ip6 ipFamily = "IPv6" -) - -// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of -// errors found. -func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error { - if ipam == nil { - return nil - } - - var errs []error - for _, cfg := range ipam.Config { - subnet, err := netip.ParsePrefix(cfg.Subnet) - if err != nil { - errs = append(errs, fmt.Errorf("invalid subnet %s: invalid CIDR block notation", cfg.Subnet)) - continue - } - subnetFamily := ip4 - if subnet.Addr().Is6() { - subnetFamily = ip6 - } - - if !enableIPv6 && subnetFamily == ip6 { - continue - } - - if subnet != subnet.Masked() { - errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked())) - } - - if ipRangeErrs := validateIPRange(cfg.IPRange, subnet, subnetFamily); len(ipRangeErrs) > 0 { - errs = append(errs, ipRangeErrs...) - } - - if err := validateAddress(cfg.Gateway, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid gateway %s: %w", cfg.Gateway, err)) - } - - for auxName, aux := range cfg.AuxAddress { - if err := validateAddress(aux, subnet, subnetFamily); err != nil { - errs = append(errs, fmt.Errorf("invalid auxiliary address %s: %w", auxName, err)) - } - } - } - - if err := multierror.Join(errs...); err != nil { - return fmt.Errorf("invalid network config:\n%w", err) - } - - return nil -} - -func validateIPRange(ipRange string, subnet netip.Prefix, subnetFamily ipFamily) []error { - if ipRange == "" { - return nil - } - prefix, err := netip.ParsePrefix(ipRange) - if err != nil { - return []error{fmt.Errorf("invalid ip-range %s: invalid CIDR block notation", ipRange)} - } - family := ip4 - if prefix.Addr().Is6() { - family = ip6 - } - - if family != subnetFamily { - return []error{fmt.Errorf("invalid ip-range %s: parent subnet is an %s block", ipRange, subnetFamily)} - } - - var errs []error - if prefix.Bits() < subnet.Bits() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: CIDR block is bigger than its parent subnet %s", ipRange, subnet)) - } - if prefix != prefix.Masked() { - errs = append(errs, fmt.Errorf("invalid ip-range %s: it should be %s", prefix, prefix.Masked())) - } - if !subnet.Overlaps(prefix) { - errs = append(errs, fmt.Errorf("invalid ip-range %s: parent subnet %s doesn't contain ip-range", ipRange, subnet)) - } - - return errs -} - -func validateAddress(address string, subnet netip.Prefix, subnetFamily ipFamily) error { - if address == "" { - return nil - } - addr, err := netip.ParseAddr(address) - if err != nil { - return errors.New("invalid address") - } - family := ip4 - if addr.Is6() { - family = ip6 - } - - if family != subnetFamily { - return fmt.Errorf("parent subnet is an %s block", subnetFamily) - } - if !subnet.Contains(addr) { - return fmt.Errorf("parent subnet %s doesn't contain this address", subnet) - } - - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index c8db97a7e..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,166 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" - -import ( - "time" - - "github.com/docker/docker/api/types/filters" -) - -const ( - // NetworkDefault is a platform-independent alias to choose the platform-specific default network stack. - NetworkDefault = "default" - // NetworkHost is the name of the predefined network used when the NetworkMode host is selected (only available on Linux) - NetworkHost = "host" - // NetworkNone is the name of the predefined network used when the NetworkMode none is selected (available on both Linux and Windows) - NetworkNone = "none" - // NetworkBridge is the name of the default network on Linux - NetworkBridge = "bridge" - // NetworkNat is the name of the default network on Windows - NetworkNat = "nat" -) - -// CreateRequest is the request message sent to the server for network create call. -type CreateRequest struct { - CreateOptions - Name string // Name is the requested name of the network. - - // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client - // package to older daemons. - CheckDuplicate *bool `json:",omitempty"` -} - -// CreateOptions holds options to create a network. -type CreateOptions struct { - Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). - EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. - IPAM *IPAM // IPAM is the network's IP Address Management. - Internal bool // Internal represents if the network is used internal only. - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. - Options map[string]string // Options specifies the network-specific options to use for when creating the network. - Labels map[string]string // Labels holds metadata specific to the network being created. -} - -// ListOptions holds parameters to filter the list of networks with. -type ListOptions struct { - Filters filters.Args -} - -// InspectOptions holds parameters to inspect network. -type InspectOptions struct { - Scope string - Verbose bool -} - -// ConnectOptions represents the data to be used to connect a container to the -// network. -type ConnectOptions struct { - Container string - EndpointConfig *EndpointSettings `json:",omitempty"` -} - -// DisconnectOptions represents the data to be used to disconnect a container -// from the network. -type DisconnectOptions struct { - Container string - Force bool -} - -// Inspect is the body of the "get network" http response message. -type Inspect struct { - Name string // Name is the name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]ServiceInfo `json:",omitempty"` -} - -// Summary is used as response when listing networks. It currently is an alias -// for [Inspect], but may diverge in the future, as not all information may -// be included when listing networks. -type Summary = Inspect - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// EndpointResource contains network resources allocated and used for a -// container in a network. -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} - -// PruneReport contains the response for Engine API: -// POST "/networks/prune" -type PruneReport struct { - NetworksDeleted []string -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9a..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744..000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go deleted file mode 100644 index 97a924e37..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" -import ( - "encoding/base64" - "encoding/json" - "io" - "strings" - - "github.com/pkg/errors" -) - -// AuthHeader is the name of the header used to send encoded registry -// authorization credentials for registry operations (push/pull). -const AuthHeader = "X-Registry-Auth" - -// AuthConfig contains authorization information for connecting to a Registry. -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// EncodeAuthConfig serializes the auth configuration as a base64url encoded -// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. -// -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -func EncodeAuthConfig(authConfig AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", errInvalidParameter{err} - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON -// authentication information as sent through the X-Registry-Auth header. -// -// This function always returns an AuthConfig, even if an error occurs. It is up -// to the caller to decide if authentication is required, and if the error can -// be ignored. -// -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 -func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { - if authEncoded == "" { - return &AuthConfig{}, nil - } - - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - return decodeAuthConfigFromReader(authJSON) -} - -// DecodeAuthConfigBody decodes authentication information as sent as JSON in the -// body of a request. This function is to provide backward compatibility with old -// clients and API versions. Current clients and API versions expect authentication -// to be provided through the X-Registry-Auth header. -// -// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an -// error occurs. It is up to the caller to decide if authentication is required, -// and if the error can be ignored. -func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { - return decodeAuthConfigFromReader(rdr) -} - -func decodeAuthConfigFromReader(rdr io.Reader) (*AuthConfig, error) { - authConfig := &AuthConfig{} - if err := json.NewDecoder(rdr).Decode(authConfig); err != nil { - // always return an (empty) AuthConfig to increase compatibility with - // the existing API. - return &AuthConfig{}, invalid(err) - } - return authConfig, nil -} - -func invalid(err error) error { - return errInvalidParameter{errors.Wrap(err, "invalid X-Registry-Auth header")} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { return e.error } - -func (e errInvalidParameter) Unwrap() error { return e.error } diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 75ee07b15..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,96 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor ocispec.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []ocispec.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go deleted file mode 100644 index a0a1eec54..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/search.go +++ /dev/null @@ -1,47 +0,0 @@ -package registry - -import ( - "context" - - "github.com/docker/docker/api/types/filters" -) - -// SearchOptions holds parameters to search images with. -type SearchOptions struct { - RegistryAuth string - - // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can - // supply to retry operations after getting an authorization error. - // - // It must return the registry authentication header value in base64 - // format, or an error if the privilege request fails. - PrivilegeFunc func(context.Context) (string, error) - Filters filters.Args - Limit int -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false". - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index 5ded7dba8..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,48 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "strconv" - "time" -) - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// String implements fmt.Stringer interface. -func (v Version) String() string { - return strconv.FormatUint(v.Index, 10) -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index 30e3de70c..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,119 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// SeccompMode is the type used for the enumeration of possible seccomp modes -// in SeccompOpts -type SeccompMode string - -const ( - SeccompModeDefault SeccompMode = "default" - SeccompModeUnconfined SeccompMode = "unconfined" - SeccompModeCustom SeccompMode = "custom" -) - -// SeccompOpts defines the options for configuring seccomp on a swarm-managed -// container. -type SeccompOpts struct { - // Mode is the SeccompMode used for the container. - Mode SeccompMode `json:",omitempty"` - // Profile is the custom seccomp profile as a json object to be used with - // the container. Mode should be set to SeccompModeCustom when using a - // custom profile in this manner. - Profile []byte `json:",omitempty"` -} - -// AppArmorMode is type used for the enumeration of possible AppArmor modes in -// AppArmorOpts -type AppArmorMode string - -const ( - AppArmorModeDefault AppArmorMode = "default" - AppArmorModeDisabled AppArmorMode = "disabled" -) - -// AppArmorOpts defines the options for configuring AppArmor on a swarm-managed -// container. Currently, custom AppArmor profiles are not supported. -type AppArmorOpts struct { - Mode AppArmorMode `json:",omitempty"` -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext - Seccomp *SeccompOpts `json:",omitempty"` - AppArmor *AppArmorOpts `json:",omitempty"` - NoNewPrivileges bool -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*container.Ulimit `json:",omitempty"` - OomScoreAdj int64 `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index bb98d5eed..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,139 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` - CSIInfo []NodeCSIInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// NodeCSIInfo represents information about a CSI plugin available on the node -type NodeCSIInfo struct { - // PluginName is the name of the CSI plugin. - PluginName string `json:",omitempty"` - // NodeID is the ID of the node as reported by the CSI plugin. This is - // different from the swarm node ID. - NodeID string `json:",omitempty"` - // MaxVolumesPerNode is the maximum number of volumes that may be published - // to this node - MaxVolumesPerNode int64 `json:",omitempty"` - // AccessibleTopology indicates the location of this node in the CSI - // plugin's topology - AccessibleTopology *Topology `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) - -// Topology defines the CSI topology of this node. This type is a duplicate of -// github.com/docker/docker/api/types.Topology. Because the type definition -// is so simple and to avoid complicated structure or circular imports, we just -// duplicate it here. See that type for full documentation -type Topology struct { - Segments map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403cc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 292bd7afc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofaster_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 32aaf0d51..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,808 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -package runtime - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges,proto3" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{0} -} -func (m *PluginSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginSpec.Merge(m, src) -} -func (m *PluginSpec) XXX_Size() int { - return m.Size() -} -func (m *PluginSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PluginSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginSpec proto.InternalMessageInfo - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value,proto3" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { - return fileDescriptor_22a625af4bc1cc87, []int{1} -} -func (m *PluginPrivilege) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginPrivilege) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginPrivilege.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginPrivilege) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginPrivilege.Merge(m, src) -} -func (m *PluginPrivilege) XXX_Size() int { - return m.Size() -} -func (m *PluginPrivilege) XXX_DiscardUnknown() { - xxx_messageInfo_PluginPrivilege.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginPrivilege proto.InternalMessageInfo - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_22a625af4bc1cc87) } - -var fileDescriptor_22a625af4bc1cc87 = []byte{ - // 225 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x9a, 0xc1, 0xc8, 0xc5, 0x15, 0x00, 0x16, - 0x08, 0x2e, 0x48, 0x4d, 0x16, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0x52, 0x73, 0xf3, 0x4b, 0x52, 0x25, - 0x98, 0xc0, 0xa2, 0x50, 0x9e, 0x90, 0x01, 0x17, 0x57, 0x41, 0x51, 0x66, 0x59, 0x66, 0x4e, 0x6a, - 0x7a, 0x6a, 0xb1, 0x04, 0xb3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x80, 0x1e, 0xc4, 0xb0, 0x00, 0x98, - 0x44, 0x10, 0x92, 0x1a, 0x21, 0x29, 0x2e, 0x8e, 0x94, 0xcc, 0xe2, 0xc4, 0xa4, 0x9c, 0xd4, 0x14, - 0x09, 0x16, 0x05, 0x46, 0x0d, 0x8e, 0x20, 0x38, 0x5f, 0x48, 0x80, 0x8b, 0x39, 0x35, 0xaf, 0x4c, - 0x82, 0x55, 0x81, 0x59, 0x83, 0x33, 0x08, 0xc4, 0x54, 0x8a, 0xe5, 0xe2, 0x47, 0x33, 0x0c, 0xab, - 0xf3, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x6e, 0x44, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x91, 0x33, - 0x08, 0xc2, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, - 0x70, 0xd0, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0xea, 0xe2, 0xca, 0x2a, 0x01, 0x00, - 0x00, -} - -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.Disabled { - i-- - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Privileges) > 0 { - for iNdEx := len(m.Privileges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Privileges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlugin(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Remote) > 0 { - i -= len(m.Remote) - copy(dAtA[i:], m.Remote) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PluginPrivilege) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - for iNdEx := len(m.Value) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Value[iNdEx]) - copy(dAtA[i:], m.Value[iNdEx]) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Value[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - offset -= sovPlugin(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PluginSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlugin - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlugin - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlugin - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlugin = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index e311b36ba..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec98..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 5b6d5ec12..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks specifies which networks the service should attach to. - // - // Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go deleted file mode 100644 index 9a268ff1b..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service_create_response.go +++ /dev/null @@ -1,20 +0,0 @@ -package swarm - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceCreateResponse contains the information returned to a client on the -// creation of a new service. -// -// swagger:model ServiceCreateResponse -type ServiceCreateResponse struct { - - // The ID of the created service. - ID string `json:"ID,omitempty"` - - // Optional warning message. - // - // FIXME(thaJeztah): this should have "omitempty" in the generated type. - // - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go b/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go deleted file mode 100644 index 0417467da..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package swarm - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 3eae4b9b2..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,237 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Status provides information about the current swarm status and role, -// obtained from the "Swarm" header in the API response. -type Status struct { - // NodeState represents the state of the node. - NodeState LocalNodeState - - // ControlAvailable indicates if the node is a swarm manager. - ControlAvailable bool -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index ad3eeca0b..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,225 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` - - // Volumes is the list of VolumeAttachments for this task. It specifies - // which particular volumes are to be used by this particular task, and - // fulfilling what mounts in the spec. - Volumes []VolumeAttachment -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} - -// VolumeAttachment contains the associating a Volume to a Task. -type VolumeAttachment struct { - // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. - ID string `json:",omitempty"` - - // Source, together with Target, indicates the Mount, as specified in the - // ContainerSpec, that this volume fulfills. - Source string `json:",omitempty"` - - // Target, together with Source, indicates the Mount, as specified - // in the ContainerSpec, that this volume fulfills. - Target string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go deleted file mode 100644 index c66a2afb8..000000000 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ /dev/null @@ -1,148 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" -) - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - Runtimes map[string]RuntimeWithStatus - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - CDISpecDirs []string - - Containerd *ContainerdInfo `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// ContainerdInfo holds information about the containerd instance used by the daemon. -type ContainerdInfo struct { - // Address is the path to the containerd socket. - Address string `json:",omitempty"` - // Namespaces is the containerd namespaces used by the daemon. - Namespaces ContainerdNamespaces -} - -// ContainerdNamespaces reflects the containerd namespaces used by the daemon. -// -// These namespaces can be configured in the daemon configuration, and are -// considered to be used exclusively by the daemon, -// -// As these namespaces are considered to be exclusively accessed -// by the daemon, it is not recommended to change these values, -// or to change them to a value that is used by other systems, -// such as cri-containerd. -type ContainerdNamespaces struct { - // Containers holds the default containerd namespace used for - // containers managed by the daemon. - // - // The default namespace for containers is "moby", but will be - // suffixed with the `.` of the remapped `root` if - // user-namespaces are enabled and the containerd image-store - // is used. - Containers string - - // Plugins holds the default containerd namespace used for - // plugins managed by the daemon. - // - // The default namespace for plugins is "moby", but will be - // suffixed with the `.` of the remapped `root` if - // user-namespaces are enabled and the containerd image-store - // is used. - Plugins string -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by [Info] struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// NetworkAddressPool is a temp struct used by [Info] struct. -type NetworkAddressPool struct { - Base string - Size int -} diff --git a/vendor/github.com/docker/docker/api/types/system/runtime.go b/vendor/github.com/docker/docker/api/types/system/runtime.go deleted file mode 100644 index d077295a0..000000000 --- a/vendor/github.com/docker/docker/api/types/system/runtime.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -// Runtime describes an OCI runtime -type Runtime struct { - // "Legacy" runtime configuration for runc-compatible runtimes. - - Path string `json:"path,omitempty"` - Args []string `json:"runtimeArgs,omitempty"` - - // Shimv2 runtime configuration. Mutually exclusive with the legacy config above. - - Type string `json:"runtimeType,omitempty"` - Options map[string]interface{} `json:"options,omitempty"` -} - -// RuntimeWithStatus extends [Runtime] to hold [RuntimeStatus]. -type RuntimeWithStatus struct { - Runtime - Status map[string]string `json:"status,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/system/security_opts.go b/vendor/github.com/docker/docker/api/types/system/security_opts.go deleted file mode 100644 index edff3eb1a..000000000 --- a/vendor/github.com/docker/docker/api/types/system/security_opts.go +++ /dev/null @@ -1,48 +0,0 @@ -package system - -import ( - "errors" - "fmt" - "strings" -) - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a -// type-safe [SecurityOpt]. -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - for _, s := range strings.Split(opt, ",") { - k, v, ok := strings.Cut(s, "=") - if !ok { - return nil, fmt.Errorf("invalid security option %q", s) - } - if k == "" || v == "" { - return nil, errors.New("invalid empty security option") - } - if k == "name" { - secopt.Name = v - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: k, Value: v}) - } - so = append(so, secopt) - } - return so, nil -} - -// KeyValue holds a key/value pair. -type KeyValue struct { - Key, Value string -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index cab5c32e3..000000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,131 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - if _, _, err := parseTimestamp(value); err != nil { - return "", fmt.Errorf("failed to parse value as time or duration: %q", value) - } - return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has -// the format ("%d.%09d", time.Unix(), int64(time.Nanosecond())). -// If the incoming nanosecond portion is longer than 9 digits it is truncated. -// The expectation is that the seconds and nanoseconds will be used to create a -// time variable. For example: -// -// seconds, nanoseconds, _ := ParseTimestamp("1136073600.000000001",0) -// since := time.Unix(seconds, nanoseconds) -// -// returns seconds as defaultSeconds if value == "" -func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) { - if value == "" { - return defaultSeconds, 0, nil - } - return parseTimestamp(value) -} - -func parseTimestamp(value string) (sec int64, nsec int64, err error) { - s, n, ok := strings.Cut(value, ".") - sec, err = strconv.ParseInt(s, 10, 64) - if err != nil { - return sec, 0, err - } - if !ok { - return sec, 0, nil - } - nsec, err = strconv.ParseInt(n, 10, 64) - if err != nil { - return sec, nsec, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - nsec = int64(float64(nsec) * math.Pow(float64(10), float64(9-len(n)))) - return sec, nsec, nil -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index fe99b7439..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,487 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" -) - -const ( - // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams - MediaTypeRawStream = "application/vnd.docker.raw-stream" - - // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams - MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - // - // This information is only available if present in the image, - // and omitted otherwise. - Created string `json:",omitempty"` - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string `json:",omitempty"` - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - // - // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config `json:",omitempty"` - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // Deprecated: this field is omitted in API v1.44, but kept for backward compatibility. Use Size instead. - VirtualSize int64 `json:"VirtualSize,omitempty"` - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata image.Metadata -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - Annotations map[string]string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion - - // SwarmStatus provides information about the current swarm status of the - // engine, obtained from the "Swarm" header in the API response. - // - // It can be a nil struct if the API version does not provide this header - // in the ping response, or if an error occurred, in which case the client - // should use other ways to get the current swarm status, such as the /swarm - // endpoint. - SwarmStatus *swarm.Status -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release. - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds networking state for a container when inspecting it. -type NetworkSettingsBase struct { - Bridge string // Bridge contains the name of the default bridge interface iff it was set through the daemon --bridge flag. - SandboxID string // SandboxID uniquely represents a container's network stack - SandboxKey string // SandboxKey identifies the sandbox - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // - // Deprecated: This field is never set and will be removed in a future release. - HairpinMode bool - // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6Address string - // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - // - // Deprecated: This field is never set and will be removed in a future release. - LinkLocalIPv6PrefixLen int - SecondaryIPAddresses []network.Address // Deprecated: This field is never set and will be removed in a future release. - SecondaryIPv6Addresses []network.Address // Deprecated: This field is never set and will be removed in a future release. -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*image.Summary - Containers []*Container - Volumes []*volume.Volume - BuildCache []*BuildCache - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go deleted file mode 100644 index 43ffe104a..000000000 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ /dev/null @@ -1,210 +0,0 @@ -package types - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/volume" -) - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -// -// Deprecated: use [image.PruneReport]. -type ImagesPruneReport = image.PruneReport - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune". -// -// Deprecated: use [volume.PruneReport]. -type VolumesPruneReport = volume.PruneReport - -// NetworkCreateRequest is the request message sent to the server for network create call. -// -// Deprecated: use [network.CreateRequest]. -type NetworkCreateRequest = network.CreateRequest - -// NetworkCreate is the expected body of the "create network" http request message -// -// Deprecated: use [network.CreateOptions]. -type NetworkCreate = network.CreateOptions - -// NetworkListOptions holds parameters to filter the list of networks with. -// -// Deprecated: use [network.ListOptions]. -type NetworkListOptions = network.ListOptions - -// NetworkCreateResponse is the response message sent by the server for network create call. -// -// Deprecated: use [network.CreateResponse]. -type NetworkCreateResponse = network.CreateResponse - -// NetworkInspectOptions holds parameters to inspect network. -// -// Deprecated: use [network.InspectOptions]. -type NetworkInspectOptions = network.InspectOptions - -// NetworkConnect represents the data to be used to connect a container to the network -// -// Deprecated: use [network.ConnectOptions]. -type NetworkConnect = network.ConnectOptions - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -// -// Deprecated: use [network.DisconnectOptions]. -type NetworkDisconnect = network.DisconnectOptions - -// EndpointResource contains network resources allocated and used for a container in a network. -// -// Deprecated: use [network.EndpointResource]. -type EndpointResource = network.EndpointResource - -// NetworkResource is the body of the "get network" http response message/ -// -// Deprecated: use [network.Inspect] or [network.Summary] (for list operations). -type NetworkResource = network.Inspect - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -// -// Deprecated: use [network.PruneReport]. -type NetworksPruneReport = network.PruneReport - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -// -// Deprecated: use [container.ExecOptions]. -type ExecConfig = container.ExecOptions - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -// -// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions]. -type ExecStartCheck = container.ExecStartOptions - -// ContainerExecInspect holds information returned by exec inspect. -// -// Deprecated: use [container.ExecInspect]. -type ContainerExecInspect = container.ExecInspect - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -// -// Deprecated: use [container.PruneReport]. -type ContainersPruneReport = container.PruneReport - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -// -// Deprecated: use [container.PathStat]. -type ContainerPathStat = container.PathStat - -// CopyToContainerOptions holds information -// about files to copy into a container. -// -// Deprecated: use [container.CopyToContainerOptions], -type CopyToContainerOptions = container.CopyToContainerOptions - -// ContainerStats contains response of Engine API: -// GET "/stats" -// -// Deprecated: use [container.StatsResponseReader]. -type ContainerStats = container.StatsResponseReader - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -// -// Deprecated: use [container.ThrottlingData]. -type ThrottlingData = container.ThrottlingData - -// CPUUsage stores All CPU stats aggregated since container inception. -// -// Deprecated: use [container.CPUUsage]. -type CPUUsage = container.CPUUsage - -// CPUStats aggregates and wraps all CPU related info of container -// -// Deprecated: use [container.CPUStats]. -type CPUStats = container.CPUStats - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -// -// Deprecated: use [container.MemoryStats]. -type MemoryStats = container.MemoryStats - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -// -// Deprecated: use [container.BlkioStatEntry]. -type BlkioStatEntry = container.BlkioStatEntry - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -// -// Deprecated: use [container.BlkioStats]. -type BlkioStats = container.BlkioStats - -// StorageStats is the disk I/O stats for read/write on Windows. -// -// Deprecated: use [container.StorageStats]. -type StorageStats = container.StorageStats - -// NetworkStats aggregates the network stats of one container -// -// Deprecated: use [container.NetworkStats]. -type NetworkStats = container.NetworkStats - -// PidsStats contains the stats of a container's pids -// -// Deprecated: use [container.PidsStats]. -type PidsStats = container.PidsStats - -// Stats is Ultimate struct aggregating all types of stats of one container -// -// Deprecated: use [container.Stats]. -type Stats = container.Stats - -// StatsJSON is newly used Networks -// -// Deprecated: use [container.StatsResponse]. -type StatsJSON = container.StatsResponse - -// EventsOptions holds parameters to filter events with. -// -// Deprecated: use [events.ListOptions]. -type EventsOptions = events.ListOptions - -// ImageSearchOptions holds parameters to search images with. -// -// Deprecated: use [registry.SearchOptions]. -type ImageSearchOptions = registry.SearchOptions - -// ImageImportSource holds source information for ImageImport -// -// Deprecated: use [image.ImportSource]. -type ImageImportSource image.ImportSource - -// ImageLoadResponse returns information to the client about a load process. -// -// Deprecated: use [image.LoadResponse]. -type ImageLoadResponse = image.LoadResponse - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API. -// -// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release. -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 621725a36..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,65 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - if v1 == v2 { - return 0 - } - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - maxVer := len(currTab) - if len(otherTab) > maxVer { - maxVer = len(otherTab) - } - for i := 0; i < maxVer; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go deleted file mode 100644 index bbd9ff0b8..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ /dev/null @@ -1,420 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/types/swarm" -) - -// ClusterVolume contains options and information specific to, and only present -// on, Swarm CSI cluster volumes. -type ClusterVolume struct { - // ID is the Swarm ID of the volume. Because cluster volumes are Swarm - // objects, they have an ID, unlike non-cluster volumes, which only have a - // Name. This ID can be used to refer to the cluster volume. - ID string - - // Meta is the swarm metadata about this volume. - swarm.Meta - - // Spec is the cluster-specific options from which this volume is derived. - Spec ClusterVolumeSpec - - // PublishStatus contains the status of the volume as it pertains to its - // publishing on Nodes. - PublishStatus []*PublishStatus `json:",omitempty"` - - // Info is information about the global status of the volume. - Info *Info `json:",omitempty"` -} - -// ClusterVolumeSpec contains the spec used to create this volume. -type ClusterVolumeSpec struct { - // Group defines the volume group of this volume. Volumes belonging to the - // same group can be referred to by group name when creating Services. - // Referring to a volume by group instructs swarm to treat volumes in that - // group interchangeably for the purpose of scheduling. Volumes with an - // empty string for a group technically all belong to the same, emptystring - // group. - Group string `json:",omitempty"` - - // AccessMode defines how the volume is used by tasks. - AccessMode *AccessMode `json:",omitempty"` - - // AccessibilityRequirements specifies where in the cluster a volume must - // be accessible from. - // - // This field must be empty if the plugin does not support - // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the - // plugin does not support it, volume will not be created. - // - // If AccessibilityRequirements is empty, but the plugin does support - // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire - // cluster is a valid target for the volume. - AccessibilityRequirements *TopologyRequirement `json:",omitempty"` - - // CapacityRange defines the desired capacity that the volume should be - // created with. If nil, the plugin will decide the capacity. - CapacityRange *CapacityRange `json:",omitempty"` - - // Secrets defines Swarm Secrets that are passed to the CSI storage plugin - // when operating on this volume. - Secrets []Secret `json:",omitempty"` - - // Availability is the Volume's desired availability. Analogous to Node - // Availability, this allows the user to take volumes offline in order to - // update or delete them. - Availability Availability `json:",omitempty"` -} - -// Availability specifies the availability of the volume. -type Availability string - -const ( - // AvailabilityActive indicates that the volume is active and fully - // schedulable on the cluster. - AvailabilityActive Availability = "active" - - // AvailabilityPause indicates that no new workloads should use the - // volume, but existing workloads can continue to use it. - AvailabilityPause Availability = "pause" - - // AvailabilityDrain indicates that all workloads using this volume - // should be rescheduled, and the volume unpublished from all nodes. - AvailabilityDrain Availability = "drain" -) - -// AccessMode defines the access mode of a volume. -type AccessMode struct { - // Scope defines the set of nodes this volume can be used on at one time. - Scope Scope `json:",omitempty"` - - // Sharing defines the number and way that different tasks can use this - // volume at one time. - Sharing SharingMode `json:",omitempty"` - - // MountVolume defines options for using this volume as a Mount-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - MountVolume *TypeMount `json:",omitempty"` - - // BlockVolume defines options for using this volume as a Block-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - BlockVolume *TypeBlock `json:",omitempty"` -} - -// Scope defines the Scope of a Cluster Volume. This is how many nodes a -// Volume can be accessed simultaneously on. -type Scope string - -const ( - // ScopeSingleNode indicates the volume can be used on one node at a - // time. - ScopeSingleNode Scope = "single" - - // ScopeMultiNode indicates the volume can be used on many nodes at - // the same time. - ScopeMultiNode Scope = "multi" -) - -// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a -// Volume at the same time can use it. -type SharingMode string - -const ( - // SharingNone indicates that only one Task may use the Volume at a - // time. - SharingNone SharingMode = "none" - - // SharingReadOnly indicates that the Volume may be shared by any - // number of Tasks, but they must be read-only. - SharingReadOnly SharingMode = "readonly" - - // SharingOneWriter indicates that the Volume may be shared by any - // number of Tasks, but all after the first must be read-only. - SharingOneWriter SharingMode = "onewriter" - - // SharingAll means that the Volume may be shared by any number of - // Tasks, as readers or writers. - SharingAll SharingMode = "all" -) - -// TypeBlock defines options for using a volume as a block-type volume. -// -// Intentionally empty. -type TypeBlock struct{} - -// TypeMount contains options for using a volume as a Mount-type -// volume. -type TypeMount struct { - // FsType specifies the filesystem type for the mount volume. Optional. - FsType string `json:",omitempty"` - - // MountFlags defines flags to pass when mounting the volume. Optional. - MountFlags []string `json:",omitempty"` -} - -// TopologyRequirement expresses the user's requirements for a volume's -// accessible topology. -type TopologyRequirement struct { - // Requisite specifies a list of Topologies, at least one of which the - // volume must be accessible from. - // - // Taken verbatim from the CSI Spec: - // - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, then the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, then the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []Topology `json:",omitempty"` - - // Preferred is a list of Topologies that the volume should attempt to be - // provisioned in. - // - // Taken from the CSI spec: - // - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []Topology `json:",omitempty"` -} - -// Topology is a map of topological domains to topological segments. -// -// This description is taken verbatim from the CSI Spec: -// -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is REQUIRED. The prefix is OPTIONAL. -// The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `json:",omitempty"` -} - -// CapacityRange describes the minimum and maximum capacity a volume should be -// created with -type CapacityRange struct { - // RequiredBytes specifies that a volume must be at least this big. The - // value of 0 indicates an unspecified minimum. - RequiredBytes int64 - - // LimitBytes specifies that a volume must not be bigger than this. The - // value of 0 indicates an unspecified maximum - LimitBytes int64 -} - -// Secret represents a Swarm Secret value that must be passed to the CSI -// storage plugin when operating on this Volume. It represents one key-value -// pair of possibly many. -type Secret struct { - // Key is the name of the key of the key-value pair passed to the plugin. - Key string - - // Secret is the swarm Secret object from which to read data. This can be a - // Secret name or ID. The Secret data is retrieved by Swarm and used as the - // value of the key-value pair passed to the plugin. - Secret string -} - -// PublishState represents the state of a Volume as it pertains to its -// use on a particular Node. -type PublishState string - -const ( - // StatePending indicates that the volume should be published on - // this node, but the call to ControllerPublishVolume has not been - // successfully completed yet and the result recorded by swarmkit. - StatePending PublishState = "pending-publish" - - // StatePublished means the volume is published successfully to the node. - StatePublished PublishState = "published" - - // StatePendingNodeUnpublish indicates that the Volume should be - // unpublished on the Node, and we're waiting for confirmation that it has - // done so. After the Node has confirmed that the Volume has been - // unpublished, the state will move to StatePendingUnpublish. - StatePendingNodeUnpublish PublishState = "pending-node-unpublish" - - // StatePendingUnpublish means the volume is still published to the node - // by the controller, awaiting the operation to unpublish it. - StatePendingUnpublish PublishState = "pending-controller-unpublish" -) - -// PublishStatus represents the status of the volume as published to an -// individual node -type PublishStatus struct { - // NodeID is the ID of the swarm node this Volume is published to. - NodeID string `json:",omitempty"` - - // State is the publish state of the volume. - State PublishState `json:",omitempty"` - - // PublishContext is the PublishContext returned by the CSI plugin when - // a volume is published. - PublishContext map[string]string `json:",omitempty"` -} - -// Info contains information about the Volume as a whole as provided by -// the CSI storage plugin. -type Info struct { - // CapacityBytes is the capacity of the volume in bytes. A value of 0 - // indicates that the capacity is unknown. - CapacityBytes int64 `json:",omitempty"` - - // VolumeContext is the context originating from the CSI storage plugin - // when the Volume is created. - VolumeContext map[string]string `json:",omitempty"` - - // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This - // is distinct from the Volume's Swarm ID, which is the ID used by all of - // the Docker Engine to refer to the Volume. If this field is blank, then - // the Volume has not been successfully created yet. - VolumeID string `json:",omitempty"` - - // AccessibleTopolgoy is the topology this volume is actually accessible - // from. - AccessibleTopology []Topology `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go deleted file mode 100644 index 37c41a609..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/create_options.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateOptions VolumeConfig -// -// Volume configuration -// swagger:model CreateOptions -type CreateOptions struct { - - // cluster volume spec - ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` - - // Name of the volume driver to use. - Driver string `json:"Driver,omitempty"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - DriverOpts map[string]string `json:"DriverOpts,omitempty"` - - // User-defined key/value metadata. - Labels map[string]string `json:"Labels,omitempty"` - - // The new volume's name. If not specified, Docker generates a name. - // - Name string `json:"Name,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go deleted file mode 100644 index ca5192a2a..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/list_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ListResponse VolumeListResponse -// -// Volume list response -// swagger:model ListResponse -type ListResponse struct { - - // List of volumes - Volumes []*Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go deleted file mode 100644 index 0b9645e00..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ /dev/null @@ -1,15 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -import "github.com/docker/docker/api/types/filters" - -// ListOptions holds parameters to list volumes. -type ListOptions struct { - Filters filters.Args -} - -// PruneReport contains the response for Engine API: -// POST "/volumes/prune" -type PruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go deleted file mode 100644 index ea7d555e5..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume.go +++ /dev/null @@ -1,75 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // cluster volume - ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *UsageData `json:"UsageData,omitempty"` -} - -// UsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model UsageData -type UsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go deleted file mode 100644 index f958f80a6..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ /dev/null @@ -1,7 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// UpdateOptions is configuration to update a Volume with. -type UpdateOptions struct { - // Spec is the ClusterVolumeSpec to update the volume to. - Spec *ClusterVolumeSpec `json:"Spec,omitempty"` -} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index f8af3ab90..000000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can -also be used by your own Go applications to do anything the command-line -interface does – running containers, pulling images, managing swarms, etc. - -For example, to list all containers (the equivalent of `docker ps --all`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" -) - -func main() { - apiClient, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - defer apiClient.Close() - - containers, err := apiClient.ContainerList(context.Background(), container.ListOptions{All: true}) - if err != nil { - panic(err) - } - - for _, ctr := range containers { - fmt.Printf("%s %s (status: %s)\n", ctr.ID, ctr.Image, ctr.Status) - } -} -``` - -[Full documentation is available on pkg.go.dev.](https://pkg.go.dev/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go deleted file mode 100644 index b76bf366b..000000000 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// BuildCancel requests the daemon to cancel the ongoing build request. -func (cli *Client) BuildCancel(ctx context.Context, id string) error { - query := url.Values{} - query.Set("id", id) - - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index 1a830f413..000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - query := url.Values{} - if opts.All { - query.Set("all", "1") - } - query.Set("keep-storage", strconv.Itoa(int(opts.KeepStorage))) - f, err := filters.ToJSON(opts.Filters) - if err != nil { - return nil, errors.Wrap(err, "prune could not marshal filters option") - } - query.Set("filters", f) - - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - - if err != nil { - return nil, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, errors.Wrap(err, "error retrieving disk usage") - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 9746d288d..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index b968c2b23..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options checkpoint.DeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 8feb1f3f7..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/checkpoint" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) { - var checkpoints []checkpoint.Summary - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index 60d91bc65..000000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,461 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ - -# Usage - -You use the library by constructing a client object using [NewClientWithOpts] -and calling methods on it. The client can be configured from environment -variables by passing the [FromEnv] option, or configured manually by passing any -of the other available [Opts]. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), container.ListOptions{}) - if err != nil { - panic(err) - } - - for _, ctr := range containers { - fmt.Printf("%s %s\n", ctr.ID, ctr.Image) - } - } -*/ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "net/url" - "path" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - "go.opentelemetry.io/otel/trace" -) - -// DummyHost is a hostname used for local communication. -// -// It acts as a valid formatted hostname for local connections (such as "unix://" -// or "npipe://") which do not require a hostname. It should never be resolved, -// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] -// and [RFC 6761, Section 6.3]). -// -// [RFC 7230, Section 5.4] defines that an empty header must be used for such -// cases: -// -// If the authority component is missing or undefined for the target URI, -// then a client MUST send a Host header field with an empty field-value. -// -// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not -// allow an empty header to be used, and requires req.URL.Scheme to be either -// "http" or "https". -// -// For further details, refer to: -// -// - https://github.com/docker/engine-api/issues/189 -// - https://github.com/golang/go/issues/13624 -// - https://github.com/golang/go/issues/61076 -// - https://github.com/moby/moby/issues/45935 -// -// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 -// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 -// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 -// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 -const DummyHost = "api.moby.localhost" - -// fallbackAPIVersion is the version to fallback to if API-version negotiation -// fails. This version is the highest version of the API before API-version -// negotiation was introduced. If negotiation fails (or no API version was -// included in the API response), we assume the API server uses the most -// recent version before negotiation was introduced. -const fallbackAPIVersion = "1.24" - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // userAgent is the User-Agent header to use for HTTP requests. It takes - // precedence over User-Agent headers set in customHTTPHeaders, and other - // header variables. When set to an empty string, the User-Agent header - // is removed, and no header is sent. - userAgent *string - // custom HTTP headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool - - // negotiateVersion indicates if the client should automatically negotiate - // the API version to use when making requests. API version negotiation is - // performed on the first request, after which negotiated is set to "true" - // so that subsequent requests do not re-negotiate. - negotiateVersion bool - - // negotiated indicates that API version negotiation took place - negotiated atomic.Bool - - // negotiateLock is used to single-flight the version negotiation process - negotiateLock sync.Mutex - - tp trace.TracerProvider - - // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections). - // Store the original transport as the http.Client transport will be wrapped with tracing libs. - baseTransport *http.Transport -} - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// CheckRedirect specifies the policy for dealing with redirect responses. It -// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for -// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise -// returns a [http.ErrUseLastResponse], which is special-cased by http.Client -// to use the last response. -// -// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The client (and by extension API client) can be made to send -// a request like "POST /containers//start" where what would normally be in the -// name section of the URL is empty. This triggers an HTTP 301 from the daemon. -// -// In go 1.8 this 301 is converted to a GET request, and ends up getting -// a 404 from the daemon. This behavior change manifests in the client in that -// before, the 301 was not followed and the client did not generate an error, -// but now results in a message like "Error response from daemon: page not found". -func CheckRedirect(_ *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewClientWithOpts initializes a new API client with a default HTTPClient, and -// default API host and version. It also initializes the custom HTTP headers to -// add to each request. -// -// It takes an optional list of [Opt] functional arguments, which are applied in -// the order they're provided, which allows modifying the defaults when creating -// the client. For example, the following initializes a client that configures -// itself with values from environment variables ([FromEnv]), and has automatic -// API version negotiation enabled ([WithAPIVersionNegotiation]). -// -// cli, err := client.NewClientWithOpts( -// client.FromEnv, -// client.WithAPIVersionNegotiation(), -// ) -func NewClientWithOpts(ops ...Opt) (*Client, error) { - hostURL, err := ParseHostURL(DefaultDockerHost) - if err != nil { - return nil, err - } - - client, err := defaultHTTPClient(hostURL) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - client: client, - proto: hostURL.Scheme, - addr: hostURL.Host, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if tr, ok := c.client.Transport.(*http.Transport); ok { - // Store the base transport before we wrap it in tracing libs below - // This is used, as an example, to close idle connections when the client is closed - c.baseTransport = tr - } - - if c.scheme == "" { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - if c.tlsConfig() != nil { - c.scheme = "https" - } else { - c.scheme = "http" - } - } - - c.client.Transport = otelhttp.NewTransport( - c.client.Transport, - otelhttp.WithTracerProvider(c.tp), - otelhttp.WithSpanNameFormatter(func(_ string, req *http.Request) string { - return req.Method + " " + req.URL.Path - }), - ) - - return c, nil -} - -func (cli *Client) tlsConfig() *tls.Config { - if cli.baseTransport == nil { - return nil - } - return cli.baseTransport.TLSClientConfig -} - -func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { - transport := &http.Transport{} - err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) - if err != nil { - return nil, err - } - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if cli.baseTransport != nil { - cli.baseTransport.CloseIdleConnections() - return nil - } - return nil -} - -// checkVersion manually triggers API version negotiation (if configured). -// This allows for version-dependent code to use the same version as will -// be negotiated when making the actual requests, and for which cases -// we cannot do the negotiation lazily. -func (cli *Client) checkVersion(ctx context.Context) error { - if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() { - // Ensure exclusive write access to version and negotiated fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - // May have been set during last execution of critical zone - if cli.negotiated.Load() { - return nil - } - - ping, err := cli.Ping(ctx) - if err != nil { - return err - } - cli.negotiateAPIVersionPing(ping) - } - return nil -} - -// getAPIPath returns the versioned request path to call the API. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string - _ = cli.checkVersion(ctx) - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the API -// version. NegotiateAPIVersion downgrades the client's API version to match the -// APIVersion if the ping version is lower than the default version. If the API -// version reported by the server is higher than the maximum version supported -// by the client, it uses the client's maximum version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, or if the -// client did not get a successful ping response, it assumes it is connected with -// an old daemon that does not support API version negotiation, in which case it -// downgrades to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - if !cli.manualOverride { - // Avoid concurrent modification of version-related fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - ping, err := cli.Ping(ctx) - if err != nil { - // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it. - return - } - cli.negotiateAPIVersionPing(ping) - } -} - -// NegotiateAPIVersionPing downgrades the client's API version to match the -// APIVersion in the ping response. If the API version in pingResponse is higher -// than the maximum version supported by the client, it uses the client's maximum -// version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// ([EnvOverrideAPIVersion]) environment variable, or if the client is initialized -// with a fixed version ([WithVersion]), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, we assume -// we are connected with an old daemon without API version negotiation support, -// and downgrade to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { - if !cli.manualOverride { - // Avoid concurrent modification of version-related fields - cli.negotiateLock.Lock() - defer cli.negotiateLock.Unlock() - - cli.negotiateAPIVersionPing(pingResponse) - } -} - -// negotiateAPIVersionPing queries the API and updates the version to match the -// API version from the ping response. -func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { - // default to the latest version before versioning headers existed - if pingResponse.APIVersion == "" { - pingResponse.APIVersion = fallbackAPIVersion - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(pingResponse.APIVersion, cli.version) { - cli.version = pingResponse.APIVersion - } - - // Store the results, so that automatic API version negotiation (if enabled) - // won't be performed on the next request. - if cli.negotiateVersion { - cli.negotiated.Store(true) - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - c := *cli.client - return &c -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - proto, addr, ok := strings.Cut(host, "://") - if !ok || addr == "" { - return nil, errors.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) { - if cli.baseTransport == nil || cli.baseTransport.DialContext == nil { - return nil - } - - if cli.baseTransport.TLSClientConfig != nil { - // When using a tls config we don't use the configured dialer but instead a fallback dialer... - // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn - // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit. - return nil - } - return cli.baseTransport.DialContext -} - -// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. It is used by -// ["docker dial-stdio"]. -// -// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014 -func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { - return func(ctx context.Context) (net.Conn, error) { - if dialFn := cli.dialerFromTransport(); dialFn != nil { - return dialFn(ctx, cli.proto, cli.addr) - } - switch cli.proto { - case "unix": - return net.Dial(cli.proto, cli.addr) - case "npipe": - return sockets.DialPipe(cli.addr, 32*time.Second) - default: - if tlsConfig := cli.tlsConfig(); tlsConfig != nil { - return tls.Dial(cli.proto, cli.addr, tlsConfig) - } - return net.Dial(cli.proto, cli.addr) - } - } -} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go deleted file mode 100644 index 9e366ce20..000000000 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import "net/http" - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// -// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], -// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API -// version negotiation by passing the [WithAPIVersionNegotiation] option instead -// of WithVersion. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 9fe78ea43..000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index 56572d1a2..000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index 3deb4a8e2..000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 2c6c7cb36..000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} - } - if err := cli.NewVersionError(ctx, "1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Config{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 14dd3813e..000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index d05b0113a..000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError(ctx, "1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 6995861df..000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError(ctx, "1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index 6a32e5f66..000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,60 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, http.Header{ - "Content-Type": {"text/plain"}, - }) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index 26b3f0915..000000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index 8490a3b15..000000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,93 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) - if err != nil { - return container.PathStat{}, err - } - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - defer ensureReaderClosed(response) - if err != nil { - return err - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, container.PathStat{}, err - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { - var stat container.PathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index 5442d4267..000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,116 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "path" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based on the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { - var response container.CreateResponse - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - if err := cli.NewVersionError(ctx, "1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.41", "specify container image platform"); platform != nil && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.44", "specify health-check start interval"); config != nil && config.Healthcheck != nil && config.Healthcheck.StartInterval != 0 && err != nil { - return response, err - } - if err := cli.NewVersionError(ctx, "1.44", "specify mac-address per network"); hasEndpointSpecificMacAddress(networkingConfig) && err != nil { - return response, err - } - - if hostConfig != nil { - if versions.LessThan(cli.ClientVersion(), "1.25") { - // When using API 1.24 and under, the client is responsible for removing the container - hostConfig.AutoRemove = false - } - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { - // KernelMemory was added in API 1.40, and deprecated in API 1.42 - hostConfig.KernelMemory = 0 - } - if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { - // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize - hostConfig.ConsoleSize = [2]uint{0, 0} - } - } - - // Since API 1.44, the container-wide MacAddress is deprecated and will trigger a WARNING if it's specified. - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.44") { - config.MacAddress = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44. - } - - query := url.Values{} - if p := formatPlatform(platform); p != "" { - query.Set("platform", p) - } - - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). -// -// Similar to containerd's platforms.Format(), but does allow components to be -// omitted (e.g. pass "architecture" only, without "os": -// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *ocispec.Platform) string { - if platform == nil { - return "" - } - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// hasEndpointSpecificMacAddress checks whether one of the endpoint in networkingConfig has a MacAddress defined. -func hasEndpointSpecificMacAddress(networkingConfig *network.NetworkingConfig) bool { - if networkingConfig == nil { - return false - } - for _, endpoint := range networkingConfig.EndpointsConfig { - if endpoint.MacAddress != "" { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index c22c819a7..000000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.FilesystemChange, error) { - var changes []container.FilesystemChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index 9379448d1..000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,76 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) { - var response types.IDResponse - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { - return response, err - } - if versions.LessThan(cli.ClientVersion(), "1.42") { - options.ConsoleSize = nil - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, http.Header{ - "Content-Type": {"application/json"}, - }) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) { - var response container.ExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index d0c0a5cba..000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index d48f0d3a6..000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} - } - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 7c9529f1e..000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - if signal != "" { - query.Set("signal", signal) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index 782e1b3c6..000000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 61197d840..000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5e7271a37..000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 29c922da7..000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { - var report container.PruneReport - - if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index 39f7b106a..000000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 240fdf552..000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index 5cfd01d47..000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options container.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 02b5079bc..000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index 33ba85f24..000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index b5641daee..000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return container.StatsResponseReader{}, err - } - - return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), - }, nil -} - -// ContainerStatsOneShot gets a single stat entry from a container. -// It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { - query := url.Values{} - query.Set("stream", "0") - query.Set("one-shot", "1") - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return container.StatsResponseReader{}, err - } - - return container.StatsResponseReader{ - Body: resp.body, - OSType: getDockerOS(resp.header.Get("Server")), - }, nil -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index 7c98a354b..000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index a5b78999b..000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index 1d8f87316..000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index bf68a5300..000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 8bb6be0a1..000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,117 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error, 1) - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - errC <- err - return resultC, errC - } - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - query := url.Values{} - if condition != "" { - query.Set("condition", string(condition)) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - - body := resp.body - responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(body, responseText) - - var res container.WaitResponse - if err := json.NewDecoder(stream).Decode(&res); err != nil { - // NOTE(nicks): The /wait API does not work well with HTTP proxies. - // At any time, the proxy could cut off the response stream. - // - // But because the HTTP status has already been written, the proxy's - // only option is to write a plaintext error message. - // - // If there's a JSON parsing error, read the real error message - // off the body and send it to the client. - if errors.As(err, new(*json.SyntaxError)) { - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) - } else { - errC <- err - } - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.WaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index ba0d92e9e..000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { - var query url.Values - if len(options.Types) > 0 { - query = url.Values{} - for _, t := range options.Types { - query.Add("type", string(t)) - } - } - - serverResp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.DiskUsage{}, err - } - - var du types.DiskUsage - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index 68e6ec5ed..000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registry.DistributionInspect - if imageRef == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef} - } - - if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - - var headers http.Header - if encodedRegistryAuth != "" { - headers = http.Header{ - registry.AuthHeader: {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) - defer ensureReaderClosed(resp) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go deleted file mode 100644 index 61dd45c1d..000000000 --- a/vendor/github.com/docker/docker/client/envvars.go +++ /dev/null @@ -1,90 +0,0 @@ -package client // import "github.com/docker/docker/client" - -const ( - // EnvOverrideHost is the name of the environment variable that can be used - // to override the default host to connect to (DefaultDockerHost). - // - // This env-var is read by FromEnv and WithHostFromEnv and when set to a - // non-empty value, takes precedence over the default host (which is platform - // specific), or any host already set. - EnvOverrideHost = "DOCKER_HOST" - - // EnvOverrideAPIVersion is the name of the environment variable that can - // be used to override the API version to use. Value should be - // formatted as MAJOR.MINOR, for example, "1.19". - // - // This env-var is read by FromEnv and WithVersionFromEnv and when set to a - // non-empty value, takes precedence over API version negotiation. - // - // This environment variable should be used for debugging purposes only, as - // it can set the client to use an incompatible (or invalid) API version. - EnvOverrideAPIVersion = "DOCKER_API_VERSION" - - // EnvOverrideCertPath is the name of the environment variable that can be - // used to specify the directory from which to load the TLS certificates - // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure - // the Client for a TCP connection protected by TLS client authentication. - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection. Refer to EnvTLSVerify below to learn how to - // disable verification for testing purposes. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // For local access to the API, it is recommended to connect with the daemon - // using the default local socket connection (on Linux), or the named pipe - // (on Windows). - // - // If you need to access the API of a remote daemon, consider using an SSH - // (ssh://) connection, which is easier to set up, and requires no additional - // configuration if the host is accessible using ssh. - // - // If you cannot use the alternatives above, and you must expose the API over - // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ - // to learn how to configure the daemon and client to use a TCP connection - // with TLS client authentication. Make sure you know the differences between - // a regular TLS connection and a TLS connection protected by TLS client - // authentication, and verify that the API cannot be accessed by other clients. - EnvOverrideCertPath = "DOCKER_CERT_PATH" - - // EnvTLSVerify is the name of the environment variable that can be used to - // enable or disable TLS certificate verification. When set to a non-empty - // value, TLS certificate verification is enabled, and the client is configured - // to use a TLS connection, using certificates from the default directories - // (within `~/.docker`); refer to EnvOverrideCertPath above for additional - // details. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // Before setting up your client and daemon to use a TCP connection with TLS - // client authentication, consider using one of the alternatives mentioned - // in EnvOverrideCertPath above. - // - // Disabling TLS certificate verification (for testing purposes) - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection, and it is highly recommended to keep verification - // enabled to prevent machine-in-the-middle attacks. Refer to the documentation - // at https://docs.docker.com/engine/security/protect-access/ and pages linked - // from that page to learn how to configure the daemon and client to use a - // TCP connection with TLS client authentication enabled. - // - // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to - // disable TLS certificate verification. Disabling verification is insecure, - // so should only be done for testing purposes. From the Go documentation - // (https://pkg.go.dev/crypto/tls#Config): - // - // InsecureSkipVerify controls whether a client verifies the server's - // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls - // accepts any certificate presented by the server and any host name in that - // certificate. In this mode, TLS is susceptible to machine-in-the-middle - // attacks unless custom verification is used. This should be used only for - // testing or in combination with VerifyConnection or VerifyPeerCertificate. - EnvTLSVerify = "DOCKER_TLS_VERIFY" -) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index 0d01e243f..000000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,77 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - error -} - -// Error returns a string representation of an errConnectionFailed -func (e errConnectionFailed) Error() string { - return e.error.Error() -} - -func (e errConnectionFailed) Unwrap() error { - return e.error -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - return errors.As(err, &errConnectionFailed{}) -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - var err error - if host == "" { - err = fmt.Errorf("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") - } else { - err = fmt.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host) - } - return errConnectionFailed{error: err} -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. -func IsErrNotFound(err error) bool { - return errdefs.IsNotFound(err) -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() {} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -// NewVersionError returns an error if the APIVersion required is less than the -// current supported version. -// -// It performs API-version negotiation if the Client is configured with this -// option, otherwise it assumes the latest API version is used. -func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature string) error { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index d3ab26bed..000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,100 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index 839d4c5cd..000000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,141 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "context" - "fmt" - "net" - "net/http" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" -) - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - req, err := cli.buildRequest(ctx, http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - conn, mediaType, err := cli.setupHijackConn(req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.NewHijackedResponse(conn, mediaType), err -} - -// DialHijack returns a hijacked connection with negotiated protocol proto. -func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - conn, _, err := cli.setupHijackConn(req, proto) - return conn, err -} - -func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) { - ctx := req.Context() - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - dialer := cli.Dialer() - conn, err := dialer(ctx) - if err != nil { - return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - defer func() { - if retErr != nil { - conn.Close() - } - }() - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - _ = tcpConn.SetKeepAlive(true) - _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - hc := &hijackedConn{conn, bufio.NewReader(conn)} - - // Server hijacks the connection, error 'connection closed' expected - resp, err := otelhttp.NewTransport(hc).RoundTrip(req) - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - _ = resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - - if hc.r.Buffered() > 0 { - // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite if the underlying connection - // implements it. - if _, ok := hc.Conn.(types.CloseWriter); ok { - conn = &hijackedConnCloseWriter{hc} - } else { - conn = hc - } - } else { - hc.r.Reset(nil) - } - - var mediaType string - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = resp.Header.Get("Content-Type") - } - - return conn, mediaType, nil -} - -// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case -// that a) there was already buffered data in the http layer when Hijack() was -// called, and b) the underlying net.Conn does *not* implement CloseWrite(). -// hijackedConn does not implement CloseWrite() either. -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := req.Write(c.Conn); err != nil { - return nil, err - } - return http.ReadResponse(c.r, req) -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -// hijackedConnCloseWriter is a hijackedConn which additionally implements -// CloseWrite(). It is returned by setupHijackConn in the case that a) there -// was already buffered data in the http layer when Hijack() was called, and b) -// the underlying net.Conn *does* implement CloseWrite(). -type hijackedConnCloseWriter struct { - *hijackedConn -} - -var _ types.CloseWriter = &hijackedConnCloseWriter{} - -func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) - return conn.CloseWrite() -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index d294ddc8b..000000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,144 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends a request to the daemon to build images. -// The Body in the response implements an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(ctx, options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header{} - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: getDockerOS(serverResp.header.Get("Server")), - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError(ctx, "1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - if err := cli.NewVersionError(ctx, "1.32", "platform"); err != nil { - return query, err - } - query.Set("platform", strings.ToLower(options.Platform)) - } - if options.BuildID != "" { - query.Set("buildid", options.BuildID) - } - query.Set("version", string(options.Version)) - - if options.Outputs != nil { - outputsJSON, err := json.Marshal(options.Outputs) - if err != nil { - return query, err - } - query.Set("outputs", string(outputsJSON)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index 7c7873dca..000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" -) - -// ImageCreate creates a new image based on the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/images/create", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index b5bea10d8..000000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index 43d55eda8..000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" -) - -// ImageImport creates a new image based on the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { - if ref != "" { - // Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 1de10e5a0..000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index a9cc1e21e..000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,59 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { - var images []image.Summary - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return images, err - } - - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("shared-size", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index c68f0013e..000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - resp, err := cli.postRaw(ctx, "/images/load", v, input, http.Header{ - "Content-Type": {"application/x-tar"}, - }) - if err != nil { - return image.LoadResponse{}, err - } - return image.LoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 5ee987e24..000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { - var report image.PruneReport - - if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index 1634c4c80..000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/errdefs" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 16f9c4651..000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,73 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - name := reference.FamiliarName(ref) - query := url.Values{} - if !options.All { - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - } - - if options.Platform != nil { - if err := cli.NewVersionError(ctx, "1.46", "platform"); err != nil { - return nil, err - } - - p := *options.Platform - pJson, err := json.Marshal(p) - if err != nil { - return nil, fmt.Errorf("invalid platform: %v", err) - } - - query.Set("platform", string(pJson)) - } - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 652d1bfa3..000000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []image.DeleteResponse - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return dels, err - } - - err = json.NewDecoder(resp.body).Decode(&dels) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index d1314e4b2..000000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index 0a0745757..000000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImageSearch makes the docker host search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.get(ctx, "/images/search", query, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index ea6b4a1e6..000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index cc3fcc467..000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types/system" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (system.Info, error) { - var info system.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return info, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index cc60a5d13..000000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,202 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/system" - "github.com/docker/docker/api/types/volume" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) - Dialer() func(context.Context) (net.Conn, error) - Close() error -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options container.AttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) - ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options container.ListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options container.RemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (container.StatsResponseReader, error) - ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) - ContainerStart(ctx context.Context, container string, options container.StartOptions) error - ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) - BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) - ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) - ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, error) - NetworkInspectWithRaw(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, []byte, error) - NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (system.Info, error) - RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) - VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error) - VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index c585c1045..000000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/checkpoint" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options checkpoint.CreateOptions) error - CheckpointDelete(ctx context.Context, container string, options checkpoint.DeleteOptions) error - CheckpointList(ctx context.Context, container string, options checkpoint.ListOptions) ([]checkpoint.Summary, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd742..000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index 19e985e0b..000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index 8daf89063..000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := network.ConnectOptions{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index 850e31cc9..000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,41 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { - var response network.CreateResponse - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - networkCreateRequest := network.CreateRequest{ - CreateOptions: options, - Name: name, - } - if versions.LessThan(cli.version, "1.44") { - enabled := true - networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. - } - - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index aaf428d85..000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/network" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := network.DisconnectOptions{ - Container: containerID, - Force: force, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index afc47de6f..000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types/network" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { - if networkID == "" { - return network.Inspect{}, nil, objectNotFoundError{object: "network", id: networkID} - } - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - - resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return network.Inspect{}, nil, err - } - - raw, err := io.ReadAll(resp.body) - if err != nil { - return network.Inspect{}, nil, err - } - - var nw network.Inspect - err = json.NewDecoder(bytes.NewReader(raw)).Decode(&nw) - return nw, raw, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index 72957d47f..000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []network.Summary - resp, err := cli.get(ctx, "/networks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index 708cc61a4..000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { - var report network.PruneReport - - if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index 9d6c6cef0..000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 95ab9b1be..000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} - } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Node{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index 1a9e6bfb1..000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index e44436deb..000000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index 0d0fc3b78..000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go deleted file mode 100644 index ddb0ca399..000000000 --- a/vendor/github.com/docker/docker/client/options.go +++ /dev/null @@ -1,233 +0,0 @@ -package client - -import ( - "context" - "net" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "go.opentelemetry.io/otel/trace" -) - -// Opt is a configuration option to initialize a [Client]. -type Opt func(*Client) error - -// FromEnv configures the client with values from environment variables. It -// is the equivalent of using the [WithTLSClientConfigFromEnv], [WithHostFromEnv], -// and [WithVersionFromEnv] options. -// -// FromEnv uses the following environment variables: -// -// - DOCKER_HOST ([EnvOverrideHost]) to set the URL to the docker server. -// - DOCKER_API_VERSION ([EnvOverrideAPIVersion]) to set the version of the -// API to use, leave empty for latest. -// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from -// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem'). -// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification -// (off by default). -func FromEnv(c *Client) error { - ops := []Opt{ - WithTLSClientConfigFromEnv(), - WithHostFromEnv(), - WithVersionFromEnv(), - } - for _, op := range ops { - if err := op(c); err != nil { - return err - } - } - return nil -} - -// WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. It returns -// an error if the client does not have a [http.Transport] configured. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) Opt { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST ([EnvOverrideHost]) environment variable. If DOCKER_HOST is not set, -// or set to an empty value, the host is not modified. -func WithHostFromEnv() Opt { - return func(c *Client) error { - if host := os.Getenv(EnvOverrideHost); host != "" { - return WithHost(host)(c) - } - return nil - } -} - -// WithHTTPClient overrides the client's HTTP client with the specified one. -func WithHTTPClient(client *http.Client) Opt { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithTimeout configures the time limit for requests made by the HTTP client. -func WithTimeout(timeout time.Duration) Opt { - return func(c *Client) error { - c.client.Timeout = timeout - return nil - } -} - -// WithUserAgent configures the User-Agent header to use for HTTP requests. -// It overrides any User-Agent set in headers. When set to an empty string, -// the User-Agent header is removed, and no header is sent. -func WithUserAgent(ua string) Opt { - return func(c *Client) error { - c.userAgent = &ua - return nil - } -} - -// WithHTTPHeaders appends custom HTTP headers to the client's default headers. -// It does not allow for built-in headers (such as "User-Agent", if set) to -// be overridden. Also see [WithUserAgent]. -func WithHTTPHeaders(headers map[string]string) Opt { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// WithScheme overrides the client scheme with the specified one. -func WithScheme(scheme string) Opt { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// WithTLSClientConfig applies a TLS config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { - return func(c *Client) error { - transport, ok := c.client.Transport.(*http.Transport) - if !ok { - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } - config, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - }) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - transport.TLSClientConfig = config - return nil - } -} - -// WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH ([EnvOverrideCertPath]) and DOCKER_TLS_VERIFY -// ([EnvTLSVerify]) environment variables. If DOCKER_CERT_PATH is not set or empty, -// TLS configuration is not modified. -// -// WithTLSClientConfigFromEnv uses the following environment variables: -// -// - DOCKER_CERT_PATH ([EnvOverrideCertPath]) to specify the directory from -// which to load the TLS certificates ("ca.pem", "cert.pem", "key.pem"). -// - DOCKER_TLS_VERIFY ([EnvTLSVerify]) to enable or disable TLS verification -// (off by default). -func WithTLSClientConfigFromEnv() Opt { - return func(c *Client) error { - dockerCertPath := os.Getenv(EnvOverrideCertPath) - if dockerCertPath == "" { - return nil - } - tlsc, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - }) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - return nil - } -} - -// WithVersion overrides the client version with the specified one. If an empty -// version is provided, the value is ignored to allow version negotiation -// (see [WithAPIVersionNegotiation]). -func WithVersion(version string) Opt { - return func(c *Client) error { - if version != "" { - c.version = version - c.manualOverride = true - } - return nil - } -} - -// WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION ([EnvOverrideAPIVersion]) environment variable. -// If DOCKER_API_VERSION is not set, or set to an empty value, the version -// is not modified. -func WithVersionFromEnv() Opt { - return func(c *Client) error { - return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) - } -} - -// WithAPIVersionNegotiation enables automatic API version negotiation for the client. -// With this option enabled, the client automatically negotiates the API version -// to use when making requests. API version negotiation is performed on the first -// request; subsequent requests do not re-negotiate. -func WithAPIVersionNegotiation() Opt { - return func(c *Client) error { - c.negotiateVersion = true - return nil - } -} - -// WithTraceProvider sets the trace provider for the client. -// If this is not set then the global trace provider will be used. -func WithTraceProvider(provider trace.TracerProvider) Opt { - return func(c *Client) error { - c.tp = provider - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index bf3e9b1cd..000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,76 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "path" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/errdefs" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", -// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use -// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. It ignores internal server errors returned by the API, which -// may be returned if the daemon is in an unhealthy state, but returns errors -// for other non-success status codes, failing to connect to the API, or failing -// to parse the API response. -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - - // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want - // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { - case http.StatusOK, http.StatusInternalServerError: - // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) - } - } else if IsErrConnectionFailed(err) { - return ping, err - } - - // HEAD failed; fallback to GET. - req.Method = http.MethodGet - serverResp, err = cli.doRequest(req) - defer ensureReaderClosed(serverResp) - if err != nil { - return ping, err - } - return parsePingResponse(cli, serverResp) -} - -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { - var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) - } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - if bv := resp.header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) - } - if si := resp.header.Get("Swarm"); si != "" { - state, role, _ := strings.Cut(si, "/") - ping.SwarmStatus = &swarm.Status{ - NodeState: swarm.LocalNodeState(state), - ControlAvailable: role == "manager", - } - } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index b95dbaf68..000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 01f6574f9..000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 736da48bd..000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index f09e46066..000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index a0d8c3500..000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,117 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - return cli.get(ctx, "/plugins/privileges", query, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/plugins/pull", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(ctx, privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, errors.Errorf("permission denied while installing plugin %s", options.RemoteRef) - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index 2091a054d..000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return plugins, err - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index 8f68a86ee..000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - - "github.com/docker/docker/api/types/registry" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, http.Header{ - registry.AuthHeader: {registryAuth}, - }) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 4cd66958c..000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index dcf5752ca..000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 5cade450f..000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,42 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError(ctx, "1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, http.Header{ - registry.AuthHeader: {registryAuth}, - }) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index 6eea9b4e4..000000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,284 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "reflect" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers http.Header) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.putRaw(ctx, path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - // PUT requests are expected to always have a body (apparently) - // so explicitly pass an empty body to sendRequest to signal that - // it should set the Content-Type header if not already present. - if body == nil { - body = http.NoBody - } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers http.Header) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) -} - -func encodeBody(obj interface{}, headers http.Header) (io.Reader, http.Header, error) { - if obj == nil { - return nil, headers, nil - } - // encoding/json encodes a nil pointer as the JSON document `null`, - // irrespective of whether the type implements json.Marshaler or encoding.TextMarshaler. - // That is almost certainly not what the caller intended as the request body. - if reflect.TypeOf(obj).Kind() == reflect.Ptr && reflect.ValueOf(obj).IsNil() { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) { - req, err := http.NewRequestWithContext(ctx, method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - req.URL.Scheme = cli.scheme - req.URL.Host = cli.addr - - if cli.proto == "unix" || cli.proto == "npipe" { - // Override host header for non-tcp connections. - req.Host = DummyHost - } - - if body != nil && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers http.Header) (serverResponse, error) { - req, err := cli.buildRequest(ctx, method, cli.getAPIPath(ctx, path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - - resp, err := cli.doRequest(req) - switch { - case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) - case err == nil: - err = cli.checkResponseErr(resp) - } - return resp, errdefs.FromStatusCode(err, resp.statusCode) -} - -// FIXME(thaJeztah): Should this actually return a serverResp when a connection error occurred? -func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - resp, err := cli.client.Do(req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, errConnectionFailed{fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)} - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errConnectionFailed{errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")} - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return serverResp, err - } - - if uErr, ok := err.(*url.Error); ok { - if nErr, ok := uErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errConnectionFailed{errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)} - } - } - } - - if nErr, ok := err.(net.Error); ok { - // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)? - if nErr.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { - err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") - } else { - _ = f.Close() - err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") - } - } - - return serverResp, errConnectionFailed{errors.Wrap(err, "error during connect")} - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - var body []byte - var err error - if serverResp.body != nil { - bodyMax := 1 * 1024 * 1024 // 1 MiB - bodyR := &io.LimitedReader{ - R: serverResp.body, - N: int64(bodyMax), - } - body, err = io.ReadAll(bodyR) - if err != nil { - return err - } - if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) - } - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var daemonErr error - if serverResp.header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return errors.Wrap(err, "Error reading JSON") - } - daemonErr = errors.New(strings.TrimSpace(errorResponse.Message)) - } else { - daemonErr = errors.New(strings.TrimSpace(string(body))) - } - return errors.Wrap(daemonErr, "Error response from daemon") -} - -func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - for k, v := range headers { - req.Header[http.CanonicalHeaderKey(k)] = v - } - - if cli.userAgent != nil { - if *cli.userAgent == "" { - req.Header.Del("User-Agent") - } else { - req.Header.Set("User-Agent", *cli.userAgent) - } - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - _, _ = io.CopyN(io.Discard, response.body, 512) - _ = response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index 7b7f1ba74..000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index a9cb59889..000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Secret{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index 4d21639ef..000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index 079ed6739..000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError(ctx, "1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 9dfe67198..000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a secret. -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError(ctx, "1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index b72cb420d..000000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,193 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { - var response swarm.ServiceCreateResponse - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} - -func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { - warning = digestWarning(taskSpec.ContainerSpec.Image) - } else { - taskSpec.ContainerSpec.Image = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { - warning = digestWarning(taskSpec.PluginSpec.Remote) - } else { - taskSpec.PluginSpec.Remote = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// image unmodified in other situations. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return image -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// empty string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index cee020c98..000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} - } - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Service{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index f97ec75a5..000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - if options.Status { - query.Set("status", "true") - } - - resp, err := cli.get(ctx, "/services", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index e9e30a2ab..000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 2c46326eb..000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index d2f03f02f..000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,86 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" -) - -// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. -// It should be the value as set *before* the update. You can find this value in the Meta field -// of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { - response := swarm.ServiceUpdateResponse{} - - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return response, err - } - - query := url.Values{} - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", version.String()) - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - headers := http.Header{} - if versions.LessThan(cli.version, "1.30") { - // the custom "version" header was used by engine API before 20.10 - // (API 1.30) to switch between client- and server-side lookup of - // image digests. - headers["version"] = []string{cli.version} - } - if options.EncodedRegistryAuth != "" { - headers[registry.AuthHeader] = []string{options.EncodedRegistryAuth} - } - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 19f59dd58..000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index da3c1637e..000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index b52b67a88..000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455d..000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b36..000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index d2412f7d4..000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 9fde7d75e..000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", version.String()) - query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) - query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index dde1f6c59..000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} - } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Task{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 4869b4449..000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index b8c20e71d..000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44eb..000000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 8f17ff4e8..000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index b3b182437..000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - var vol volume.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, err - } - err = json.NewDecoder(resp.body).Decode(&vol) - return vol, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index b3ba4e604..000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { - vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return vol, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - if volumeID == "" { - return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var vol volume.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return vol, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&vol) - return vol, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index d5ea9827c..000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) { - var volumes volume.ListResponse - query := url.Values{} - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 9b09c30fa..000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { - var report volume.PruneReport - - if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index b8bdc5ae8..000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if force { - // Make sure we negotiated (if the client is configured to do so), - // as code below contains API-version specific handling of options. - // - // Normally, version-negotiation (if enabled) would not happen until - // the API request is made. - if err := cli.checkVersion(ctx); err != nil { - return err - } - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go deleted file mode 100644 index 151863f07..000000000 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" -) - -// VolumeUpdate updates a volume. This only works for Cluster Volumes, and -// only some fields can be updated. -func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError(ctx, "1.42", "volume update"); err != nil { - return err - } - - query := url.Values{} - query.Set("version", version.String()) - - resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e9..000000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index 042de4b7b..000000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index ebcd78930..000000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,46 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index f94034cbd..000000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,123 +0,0 @@ -package errdefs - -import ( - "context" - "errors" -) - -type causer interface { - Cause() error -} - -type wrapErr interface { - Unwrap() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - case wrapErr: - return getImplementer(e.Unwrap()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/docker/docker/internal/multierror/multierror.go b/vendor/github.com/docker/docker/internal/multierror/multierror.go deleted file mode 100644 index cf4d6a595..000000000 --- a/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ /dev/null @@ -1,46 +0,0 @@ -package multierror - -import ( - "strings" -) - -// Join is a drop-in replacement for errors.Join with better formatting. -func Join(errs ...error) error { - n := 0 - for _, err := range errs { - if err != nil { - n++ - } - } - if n == 0 { - return nil - } - e := &joinError{ - errs: make([]error, 0, n), - } - for _, err := range errs { - if err != nil { - e.errs = append(e.errs, err) - } - } - return e -} - -type joinError struct { - errs []error -} - -func (e *joinError) Error() string { - if len(e.errs) == 1 { - return strings.TrimSpace(e.errs[0].Error()) - } - stringErrs := make([]string, 0, len(e.errs)) - for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) - } - return "* " + strings.Join(stringErrs, "\n* ") -} - -func (e *joinError) Unwrap() []error { - return e.errs -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index cde64f08e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1500 +0,0 @@ -// Package archive provides helper functions for dealing with archive files. -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/klauspost/compress/zstd" - "github.com/moby/patternmatcher" - "github.com/moby/sys/sequential" - "github.com/pkg/errors" -) - -// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a -// tar, but that do not have their own header entry. -// -// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not -// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and -// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. -// -// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is -// subject to change in Moby at any time -- image authors who require consistent or known directory permissions -// should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0o755 - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - IDMap idtools.IdentityMapping - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - // Allow unpacking to succeed in spite of failures to set extended - // attributes on the unpacked files due to the destination filesystem - // not supporting them or a lack of permissions. Extended attributes - // were probably in the archive for a reason, so set this option at - // your own peril. - BestEffortXattrs bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - Uncompressed Compression = 0 // Uncompressed represents the uncompressed. - Bzip2 Compression = 1 // Bzip2 is bzip2 compression algorithm. - Gzip Compression = 2 // Gzip is gzip compression algorithm. - Xz Compression = 3 // Xz is xz compression algorithm. - Zstd Compression = 4 // Zstd is zstd compression algorithm. -) - -const ( - AUFSWhiteoutFormat WhiteoutFormat = 0 // AUFSWhiteoutFormat is the default format for whiteouts - OverlayWhiteoutFormat WhiteoutFormat = 1 // OverlayWhiteoutFormat formats whiteout according to the overlay standard. -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -const ( - zstdMagicSkippableStart = 0x184D2A50 - zstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - bzip2Magic = []byte{0x42, 0x5A, 0x68} - gzipMagic = []byte{0x1F, 0x8B, 0x08} - xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} - zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} -) - -type matcher = func([]byte) bool - -func magicNumberMatcher(m []byte) matcher { - return func(source []byte) bool { - return bytes.HasPrefix(source, m) - } -} - -// zstdMatcher detects zstd compression algorithm. -// Zstandard compressed data is made of one or more frames. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. -func zstdMatcher() matcher { - return func(source []byte) bool { - if bytes.HasPrefix(source, zstdMagic) { - // Zstandard frame - return true - } - // skippable frame - if len(source) < 8 { - return false - } - // magic number from 0x184D2A50 to 0x184D2A5F. - if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { - return true - } - return false - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - compressionMap := map[Compression]matcher{ - Bzip2: magicNumberMatcher(bzip2Magic), - Gzip: magicNumberMatcher(gzipMagic), - Xz: magicNumberMatcher(xzMagic), - Zstd: zstdMatcher(), - } - for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { - fn := compressionMap[compression] - if fn(source) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { - noPigz, err := strconv.ParseBool(noPigzEnv) - if err != nil { - log.G(ctx).WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") - } - if noPigz { - log.G(ctx).Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) - return gzip.NewReader(buf) - } - } - - unpigzPath, err := exec.LookPath("unpigz") - if err != nil { - log.G(ctx).Debugf("unpigz binary not found, falling back to go gzip library") - return gzip.NewReader(buf) - } - - log.G(ctx).Debugf("Using %s to decompress", unpigzPath) - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { - return ioutils.NewReadCloserWrapper(readBuf, func() error { - cancel() - return readBuf.Close() - }) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) - return readBufWrapper, nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - if header.Name == "" { - header.Name = name - } - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - case Zstd: - return "tar.zst" - } - return "" -} - -// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to -// prevent tar.FileInfoHeader from introspecting it and potentially calling into -// glibc. -type nosysFileInfo struct { - os.FileInfo -} - -func (fi nosysFileInfo) Sys() interface{} { - // A Sys value of type *tar.Header is safe as it is system-independent. - // The tar.FileInfoHeader function copies the fields into the returned - // header without performing any OS lookups. - if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { - return sys - } - return nil -} - -// sysStat, if non-nil, populates hdr from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, hdr *tar.Header) error - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Compared to the archive/tar.FileInfoHeader function, this function is safe to -// call from a chrooted process as it does not populate fields which would -// require operating system lookups. It behaves identically to -// tar.FileInfoHeader when fi is a FileInfo value returned from -// tar.Header.FileInfo(). -// -// When fi is a FileInfo for a native file, such as returned from os.Stat() and -// os.Lstat(), the returned Header value differs from one returned from -// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not -// set as OS lookups would be required to populate them. The AccessTime and -// ChangeTime fields are not currently set (not yet implemented) although that -// is subject to change. Callers which require the AccessTime or ChangeTime -// fields to be zeroed should explicitly zero them out in the returned Header -// value to avoid any compatibility issues in the future. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) - if err != nil { - return nil, err - } - if sysStat != nil { - return hdr, sysStat(fi, hdr) - } - return hdr, nil -} - -// FileInfoHeader creates a populated Header from fi. -// -// Compared to the archive/tar package, this function fills in less information -// but is safe to call from a chrooted process. The AccessTime and ChangeTime -// fields are not set in the returned header, ModTime is truncated to one-second -// precision, and the Uname and Gname fields are only set when fi is a FileInfo -// value returned from tar.Header.FileInfo(). -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := FileInfoHeaderNoLookups(fi, link) - if err != nil { - return nil, err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - hdr.Name = canonicalTarName(name, fi.IsDir()) - return hdr, nil -} - -const paxSchilyXattr = "SCHILY.xattr." - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - const ( - // Values based on linux/include/uapi/linux/capability.h - xattrCapsSz2 = 20 - versionOffset = 3 - vfsCapRevision2 = 2 - vfsCapRevision3 = 3 - ) - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - if capability[versionOffset] == vfsCapRevision3 { - // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no - // sense outside the user namespace the archive is built in. - capability[versionOffset] = vfsCapRevision2 - capability = capability[:xattrCapsSz2] - } - if hdr.PAXRecords == nil { - hdr.PAXRecords = make(map[string]string) - } - hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// CanonicalTarNameForPath canonicalizes relativePath to a POSIX-style path using -// forward slashes. It is an alias for [filepath.ToSlash], which is a no-op on -// Linux and Unix. -// -// Deprecated: use [filepath.ToSlash]. This function will be removed in the next release. -func CanonicalTarNameForPath(relativePath string) string { - return filepath.ToSlash(relativePath) -} - -// canonicalTarName provides a platform-independent and consistent POSIX-style -// path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) string { - name = filepath.ToSlash(name) - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - // check whether the file is overlayfs whiteout - // if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - // handle re-mapping container ID mappings back to host ID mappings before - // writing tar headers/files. We skip whiteout files because they were written - // by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use sequential file access to avoid depleting the standby list on - // Windows. On Linux, this equates to a regular os.Open. - file, err := sequential.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, opts *TarOptions) error { - var ( - Lchown = true - inUserns, bestEffortXattrs bool - chownOpts *idtools.Identity - ) - - // TODO(thaJeztah): make opts a required argument. - if opts != nil { - Lchown = !opts.NoLchown - inUserns = opts.InUserNS // TODO(thaJeztah): consider deprecating opts.InUserNS and detect locally. - chownOpts = opts.ChownOpts - bestEffortXattrs = opts.BestEffortXattrs - } - - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg: - // Source is regular file. We use sequential file access to avoid depleting - // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. - file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - log.G(context.TODO()).WithFields(log.Fields{"path": path, "type": hdr.Typeflag}).Debug("skipping device nodes in a userns") - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - if inUserns && errors.Is(err, syscall.EPERM) { - // In most cases, cannot create a fifo if running in user namespace - log.G(context.TODO()).WithFields(log.Fields{"error": err, "path": path, "type": hdr.Typeflag}).Debug("creating fifo node in a userns") - return nil - } - return err - } - - case tar.TypeLink: - // #nosec G305 -- The target path is checked for path traversal. - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - log.G(context.TODO()).Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - msg := "failed to Lchown %q for UID %d, GID %d" - if inUserns && errors.Is(err, syscall.EINVAL) { - msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" - } - return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) - } - } - - var xattrErrs []string - for key, value := range hdr.PAXRecords { - xattr, ok := strings.CutPrefix(key, paxSchilyXattr) - if !ok { - continue - } - if err := system.Lsetxattr(path, xattr, []byte(value), 0); err != nil { - if bestEffortXattrs && errors.Is(err, syscall.ENOTSUP) || errors.Is(err, syscall.EPERM) { - // EPERM occurs if modifying xattrs is not allowed. This can - // happen when running in userns with restrictions (ChromeOS). - xattrErrs = append(xattrErrs, err.Error()) - continue - } - return err - } - } - - if len(xattrErrs) > 0 { - log.G(context.TODO()).WithFields(log.Fields{ - "errors": xattrErrs, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - tb, err := NewTarballer(srcPath, options) - if err != nil { - return nil, err - } - go tb.Do() - return tb.Reader(), nil -} - -// Tarballer is a lower-level interface to TarWithOptions which gives the caller -// control over which goroutine the archiving operation executes on. -type Tarballer struct { - srcPath string - options *TarOptions - pm *patternmatcher.PatternMatcher - pipeReader *io.PipeReader - pipeWriter *io.PipeWriter - compressWriter io.WriteCloser - whiteoutConverter tarWhiteoutConverter -} - -// NewTarballer constructs a new tarballer. The arguments are the same as for -// TarWithOptions. -func NewTarballer(srcPath string, options *TarOptions) (*Tarballer, error) { - pm, err := patternmatcher.New(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - return &Tarballer{ - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath: addLongPathPrefix(srcPath), - options: options, - pm: pm, - pipeReader: pipeReader, - pipeWriter: pipeWriter, - compressWriter: compressWriter, - whiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - }, nil -} - -// Reader returns the reader for the created archive. -func (t *Tarballer) Reader() io.ReadCloser { - return t.pipeReader -} - -// Do performs the archiving operation in the background. The resulting archive -// can be read from t.Reader(). Do should only be called once on each Tarballer -// instance. -func (t *Tarballer) Do() { - ta := newTarAppender( - t.options.IDMap, - t.compressWriter, - t.options.ChownOpts, - ) - ta.WhiteoutConverter = t.whiteoutConverter - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close tar writer: %s", err) - } - if err := t.compressWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close compress writer: %s", err) - } - if err := t.pipeWriter.Close(); err != nil { - log.G(context.TODO()).Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(t.srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(t.options.IncludeFiles) > 0 { - log.G(context.TODO()).Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(t.srcPath) - t.srcPath = dir - t.options.IncludeFiles = []string{base} - } - - if len(t.options.IncludeFiles) == 0 { - t.options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range t.options.IncludeFiles { - rebaseName := t.options.RebaseNames[include] - - var ( - parentMatchInfo []patternmatcher.MatchInfo - parentDirs []string - ) - - walkRoot := getWalkRoot(t.srcPath, include) - filepath.WalkDir(walkRoot, func(filePath string, f os.DirEntry, err error) error { - if err != nil { - log.G(context.TODO()).Errorf("Tar: Can't stat file %s to tar: %s", t.srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(t.srcPath, filePath) - if err != nil || (!t.options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if t.options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - for len(parentDirs) != 0 { - lastParentDir := parentDirs[len(parentDirs)-1] - if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { - break - } - parentDirs = parentDirs[:len(parentDirs)-1] - parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] - } - - var matchInfo patternmatcher.MatchInfo - if len(parentMatchInfo) != 0 { - skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) - } else { - skip, matchInfo, err = t.pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) - } - if err != nil { - log.G(context.TODO()).Errorf("Error matching %s: %v", relFilePath, err) - return err - } - - if f.IsDir() { - parentDirs = append(parentDirs, relFilePath) - parentMatchInfo = append(parentMatchInfo, matchInfo) - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !t.pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range t.pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.G(context.TODO()).Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // ignore XGlobalHeader early to avoid creating parent directories for them - if hdr.Typeflag == tar.TypeXGlobalHeader { - log.G(context.TODO()).Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) - continue - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return err - } - - // #nosec G305 -- The joined path is checked for path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(options.IDMap, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, options); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do -// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is -// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus -// we most both create them and choose metadata like permissions. -// -// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS -// on which the daemon is running. This precondition is required because this function assumes a OS-specific path -// separator when checking that a path is not the root. -func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { - // Not the root directory, ensure that the parent directory exists - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some - // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche - // usage that reduces the portability of an image. - rootIDs := options.IDMap.RootPair() - - err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) - if err != nil { - return err - } - } - } - - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMapping.RootPair() - // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0o755, rootIDs); err != nil { - return err - } - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0o700); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := FileInfoHeaderNoLookups(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMapping, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { - return archiver.IDMapping -} - -func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { - ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Ensure the command has exited before we clean anything up - done := make(chan struct{}) - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(done) - }() - - return ioutils.NewReadCloserWrapper(pipeR, func() error { - // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as - // cmd.Wait waits for any non-file stdout/stderr/stdin to close. - err := pipeR.Close() - <-done - return err - }), nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -// -// Deprecated: NewTempArchive is only used in tests and will be removed in the next release. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := os.CreateTemp(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -// -// Deprecated: TempArchive is only used in tests and will be removed in the next release. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 45ac2aa6c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,105 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/containerd/containerd/pkg/userns" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0o600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - opaqueXattrName := "trusted.overlay.opaque" - if userns.RunningInUserNS() { - opaqueXattrName = "user.overlay.opaque" - } - - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, opaqueXattrName) - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - delete(hdr.PAXRecords, paxSchilyXattr+opaqueXattrName) - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), // #nosec G305 -- An archive is being created, not extracted. - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - opaqueXattrName := "trusted.overlay.opaque" - if userns.RunningInUserNS() { - opaqueXattrName = "user.overlay.opaque" - } - - err := unix.Setxattr(dir, opaqueXattrName, []byte{'y'}, 0) - if err != nil { - return false, errors.Wrapf(err, "setxattr(%q, %s=y)", dir, opaqueXattrName) - } - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 7dee1f7a4..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index f559a3056..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,128 +0,0 @@ -//go:build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func init() { - sysStat = statUnix -} - -// addLongPathPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. It is a no-op on platforms other than Windows. -func addLongPathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -// statUnix populates hdr from system-dependent fields of fi without performing -// any OS lookups. -func statUnix(fi os.FileInfo, hdr *tar.Header) error { - // Devmajor and Devminor are only needed for special devices. - - // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): - // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 - // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). - - // ZFS in particular does not override the default: - // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 - - // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). - // Such large values cannot be encoded in a tar header. - if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { - return nil - } - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - - hdr.Uid = int(s.Uid) - hdr.Gid = int(s.Gid) - - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert - } - - return nil -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo. -// -// Creating device nodes is not supported when running in a user namespace, -// produces a [syscall.EPERM] in most cases. -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 0o7777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index e25c64b41..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/idtools" -) - -// longPathPrefix is the longpath prefix for Windows file paths. -const longPathPrefix = `\\?\` - -// addLongPathPrefix adds the Windows long path prefix to the path provided if -// it does not already have it. It is a no-op on platforms other than Windows. -// -// addLongPathPrefix is a copy of [github.com/docker/docker/pkg/longpath.AddPrefix]. -func addLongPathPrefix(srcPath string) string { - if strings.HasPrefix(srcPath, longPathPrefix) { - return srcPath - } - if strings.HasPrefix(srcPath, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - return longPathPrefix + `UNC` + srcPath[1:] - } - return longPathPrefix + srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - // Remove group- and world-writable bits. - perm &= 0o755 - - // Add the x bit: make everything +x on Windows - return perm | 0o111 -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{UID: 0, GID: 0}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 5f12ca401..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,440 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "context" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - ChangeModify = 0 // ChangeModify represents the modify operation. - ChangeAdd = 1 // ChangeAdd represents the add operation. - ChangeDelete = 2 // ChangeDelete represents the delete operation. -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar doesn't have sub-second mtime precision. The go tar -// writer (1.10+) does when using PAX format, but we round times to seconds -// to ensure archives have the same hashes for backwards compatibility. -// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. -// -// Non-sub-second is problematic when we apply changes via tar -// files. We handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a.Equal(b) || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type ( - skipChange func(string) (bool, error) - deleteChange func(string, string, os.FileInfo) (string, error) -) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var oldRoot, newRoot *FileInfo - if oldDir == "" { - emptyDir, err := os.MkdirTemp("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - log.G(context.TODO()).Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idMap, writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - log.G(context.TODO()).Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - log.G(context.TODO()).Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - log.G(context.TODO()).Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - log.G(context.TODO()).Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 81fcbc5ba..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,286 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - name := string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index 28f741a25..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.WalkDir(sourceDir, func(path string, _ os.DirEntry, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - stat: s, - } - - // system.Lgetxattr is only implemented on Linux and produces an error - // on other platforms. This code is intentionally left commented-out - // as a reminder to include this code if this would ever be implemented - // on other platforms. - // info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 853c73ee8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size or modification time for dirs, its not a good - // measure of change. See https://github.com/moby/moby/issues/9874 - // for a description of the issue with modification time, and - // https://github.com/moby/moby/pull/11422 for the change. - // (Note that in the Windows implementation of this function, - // modification time IS taken as a change). See - // https://github.com/moby/moby/pull/37982 for more information. - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 9906685e4..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Note there is slight difference between the Linux and Windows - // implementations here. Due to https://github.com/moby/moby/issues/9874, - // and the fix at https://github.com/moby/moby/pull/11422, Linux does not - // consider a change to the directory time as a change. Windows on NTFS - // does. See https://github.com/moby/moby/pull/37982 for more information. - - if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index 01eadc30d..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,486 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "context" - "errors" - "io" - "os" - "path/filepath" - "strings" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && path[len(path)-1] == filepath.Separator -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - log.G(context.TODO()).Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, io.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - // srcContent tar stream, as served by TarWithOptions(), is - // definitely in PAX format, but tar.Next() mistakenly guesses it - // as USTAR, which creates a problem: if the newBase is >100 - // characters long, WriteHeader() returns an error like - // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". - // - // To fix, set the format to PAX here. See docker/for-linux issue #484. - hdr.Format = tar.FormatPAX - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 - // and https://cure53.de/pentest-report_opa.pdf, which recommends to - // replace io.Copy with io.CopyN7. The latter allows to specify the - // maximum number of bytes that should be read. By properly defining - // the limit, it can be assured that a GZip compression bomb cannot - // easily cause a Denial-of-Service. - // After reviewing with @tonistiigi and @cpuguy83, this should not - // affect us, because here we do not read into memory, hence should - // not be vulnerable to this code consuming memory. - //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && - !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index 065bd4add..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index a878d1bac..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index e080e310a..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,263 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "context" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/containerd/log" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - log.G(context.TODO()).Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return 0, err - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = os.MkdirTemp(dest, "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, options); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - // #nosec G305 -- The joined path is guarded against path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.WalkDir(dir, func(path string, info os.DirEntry, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - return os.RemoveAll(path) - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(options.IDMap, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, options); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// IsEmpty checks if the tar archive is empty (doesn't contain any entries). -func IsEmpty(rd io.Reader) (bool, error) { - decompRd, err := DecompressStream(rd) - if err != nil { - return true, fmt.Errorf("failed to decompress archive: %v", err) - } - defer decompRd.Close() - - tarReader := tar.NewReader(decompRd) - if _, err := tarReader.Next(); err != nil { - if err == io.EOF { - return true, nil - } - return false, fmt.Errorf("failed to read next archive header: %v", err) - } - - return false, nil -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - restore := overrideUmask(0) - defer restore() - - if decompress { - decompLayer, err := DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompLayer.Close() - layer = decompLayer - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go deleted file mode 100644 index 7216f2f4f..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !windows - -package archive - -import "golang.org/x/sys/unix" - -// overrideUmask sets current process's file mode creation mask to newmask -// and returns a function to restore it. -// -// WARNING for readers stumbling upon this code. Changing umask in a multi- -// threaded environment isn't safe. Don't use this without understanding the -// risks, and don't export this function for others to use (we shouldn't even -// be using this ourself). -// -// FIXME(thaJeztah): we should get rid of these hacks if possible. -func overrideUmask(newMask int) func() { - oldMask := unix.Umask(newMask) - return func() { - unix.Umask(oldMask) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go deleted file mode 100644 index d28f5b2df..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package archive - -// overrideUmask is a no-op on windows. -func overrideUmask(newmask int) func() { - return func() {} -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path.go b/vendor/github.com/docker/docker/pkg/archive/path.go deleted file mode 100644 index 888a69758..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/path.go +++ /dev/null @@ -1,20 +0,0 @@ -package archive - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. -// On Linux: this is a no-op. -// On Windows: this does the following> -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return checkSystemDriveAndRemoveDriveLetter(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_unix.go b/vendor/github.com/docker/docker/pkg/archive/path_unix.go deleted file mode 100644 index 390264bf8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows - -package archive - -// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_windows.go b/vendor/github.com/docker/docker/pkg/archive/path_windows.go deleted file mode 100644 index 7e18c8e44..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package archive - -import ( - "fmt" - "path/filepath" - "strings" -) - -// checkSystemDriveAndRemoveDriveLetter is the Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("no relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("the specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 797143ee8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = (1 << 30) - 2 - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index 14c4ceb1d..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 4c072a87e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index 032db82ce..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// - ./foo.txt with content "hello world" -// - ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 79d682c69..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,229 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName = "/etc/subuid" - subgidFileName = "/etc/subgid" -) - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership and permissions. -func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership and permissions. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership or permissions will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// Identity is either a UID and GID pair or a SID (but not both) -type Identity struct { - UID int - GID int - SID string -} - -// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. -func (id Identity) Chown(name string) error { - return os.Chown(name, id.UID, id.GID) -} - -// IdentityMapping contains a mappings of UIDs and GIDs. -// The zero value represents an empty mapping. -type IdentityMapping struct { - UIDMaps []IDMap `json:"UIDMaps"` - GIDMaps []IDMap `json:"GIDMaps"` -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) - return Identity{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.UIDMaps) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.GIDMaps) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.UIDMaps) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.GIDMaps) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i IdentityMapping) Empty() bool { - return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - - return rangeList, s.Err() -} - -// CurrentIdentity returns the identity of the current process -func CurrentIdentity() Identity { - return Identity{UID: os.Getuid(), GID: os.Getegid()} -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index cd621bdcc..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,278 +0,0 @@ -//go:build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - - "github.com/moby/sys/user" -) - -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - path, err := filepath.Abs(path) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil { - if !stat.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if !chownExisting { - return nil - } - - // short-circuit -- we were called with an existing directory and chown was requested - return setPermissions(path, mode, owner, stat) - } - - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if os.IsNotExist(err) { - paths = []string{path} - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err = os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err = os.MkdirAll(path, mode); err != nil { - return err - } - } else if err = os.Mkdir(path, mode); err != nil { - return err - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err = setPermissions(pathComponent, mode, owner, nil); err != nil { - return err - } - } - return nil -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(name string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(name) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(name) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(strconv.Itoa(uid)) -} - -func getentUser(name string) (user.User, error) { - reader, err := callGetent("passwd", name) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(name string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(name) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(name) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(strconv.Itoa(gid)) -} - -func getentGroup(name string) (user.Group, error) { - reader, err := callGetent("group", name) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) - } - return groups[0], nil -} - -func callGetent(database, key string) (io.Reader, error) { - getentCmd, err := resolveBinary("getent") - // if no `getent` command within the execution environment, can't do anything else - if err != nil { - return nil, fmt.Errorf("unable to find getent command: %w", err) - } - command := exec.Command(getentCmd, database, key) - // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin - command.Stdin = io.NopCloser(bytes.NewReader(nil)) - out, err := command.CombinedOutput() - if err != nil { - exitCode, errC := getExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - } - return bytes.NewReader(out), nil -} - -// getExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func getExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested -// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the -// dir is on an NFS share, so don't call chown unless we absolutely must. -// Likewise for setting permissions. -func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo) error { - if stat == nil { - var err error - stat, err = os.Stat(p) - if err != nil { - return err - } - } - if stat.Mode().Perm() != mode.Perm() { - if err := os.Chmod(p, mode.Perm()); err != nil { - return err - } - } - ssi := stat.Sys().(*syscall.Stat_t) - if ssi.Uid == uint32(owner.UID) && ssi.Gid == uint32(owner.GID) { - return nil - } - return os.Chown(p, owner.UID, owner.GID) -} - -// LoadIdentityMapping takes a requested username and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func LoadIdentityMapping(name string) (IdentityMapping, error) { - usr, err := LookupUser(name) - if err != nil { - return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err) - } - - subuidRanges, err := lookupSubUIDRanges(usr) - if err != nil { - return IdentityMapping{}, err - } - subgidRanges, err := lookupSubGIDRanges(usr) - if err != nil { - return IdentityMapping{}, err - } - - return IdentityMapping{ - UIDMaps: subuidRanges, - GIDMaps: subgidRanges, - }, nil -} - -func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { - rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - rangeList, err = parseSubuid(usr.Name) - if err != nil { - return nil, err - } - } - if len(rangeList) == 0 { - return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) - } - return createIDMap(rangeList), nil -} - -func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { - rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - rangeList, err = parseSubgid(usr.Name) - if err != nil { - return nil, err - } - } - if len(rangeList) == 0 { - return nil, fmt.Errorf("no subgid ranges found for user %q", usr.Name) - } - return createIDMap(rangeList), nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index 32953f456..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - ContainerUserSidString = "S-1-5-93-2-2" -) - -// This is currently a wrapper around MkdirAll, however, since currently -// permissions aren't set through this path, the identity isn't utilized. -// Ownership is handled elsewhere, but in the future could be support here -// too. -func mkdirAs(path string, _ os.FileMode, _ Identity, _, _ bool) error { - return system.MkdirAll(path, 0) -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index f0c075e20..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,166 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "os/exec" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) -) - -const ( - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := exec.Command("id", name).CombinedOutput() - if err != nil { - return -1, -1, fmt.Errorf("error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(name string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - var args []string - switch userCommand { - case "adduser": - args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} - case "useradd": - args = []string{"-r", "-s", "/bin/false", name} - default: - return fmt.Errorf("cannot add user; no useradd/adduser binary found") - } - - if out, err := exec.Command(userCommand, args...).CombinedOutput(); err != nil { - return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("can't find available subuid range: %v", err) - } - idRange := fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1) - out, err := exec.Command("usermod", "-v", idRange, name).CombinedOutput() - if err != nil { - return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("can't find available subgid range: %v", err) - } - idRange := fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1) - out, err := exec.Command("usermod", "-w", idRange, name).CombinedOutput() - if err != nil { - return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index 6a9311c4a..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !linux - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go deleted file mode 100644 index 517a2f52c..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "os/exec" - "path/filepath" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - // only return no error if the final resolved binary basename - // matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index c1cfa62fd..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,187 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. - readBlock bool // check read BytesPipe is Wait() or not -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - defer bp.mu.Unlock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - if bp.readBlock { - bp.wait.Broadcast() - } - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - return 0, bp.closeErr - } - bp.readBlock = true - bp.wait.Wait() - bp.readBlock = false - if bp.bufLen == 0 && bp.closeErr != nil { - return 0, bp.closeErr - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 05da97b0e..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,163 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -// NOTE: umask is not considered for the file's permissions. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename and with the specified permission bits. -// NOTE: umask is not considered for the file's permissions. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index e03d3fee7..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,172 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "io" - "runtime/debug" - "sync/atomic" - - // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered - // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. - _ "crypto/sha256" - _ "crypto/sha512" - - "github.com/containerd/log" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error - closed atomic.Bool -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - if !r.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("ReadCloserWrapper") - return nil - } - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter - closed atomic.Bool -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - if !p.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("cancelReadCloser") - return nil - } - p.closeWithError(io.EOF) - return nil -} - -func subsequentCloseWarn(name string) { - log.G(context.TODO()).Error("subsequent attempt to close " + name) - if log.GetLevel() >= log.DebugLevel { - log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack())) - } -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d1826..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 1f50602f2..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,74 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync/atomic" -) - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error - closed atomic.Bool -} - -func (r *writeCloserWrapper) Close() error { - if !r.closed.CompareAndSwap(false, true) { - subsequentCloseWarn("WriteCloserWrapper") - return nil - } - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index 035160c83..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,307 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, Code is -// an integer error code, Message is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a progress message in a JSON stream. -type JSONProgress struct { - // Current is the current status and value of the progress made towards Total. - Current int64 `json:"current,omitempty"` - // Total is the end value describing when we made 100% progress for an operation. - Total int64 `json:"total,omitempty"` - // Start is the initial value for the operation. - Start int64 `json:"start,omitempty"` - // HideCounts. if true, hides the progress count indicator (xB/yB). - HideCounts bool `json:"hidecounts,omitempty"` - // Units is the unit to print for progress. It defaults to "bytes" if empty. - Units string `json:"units,omitempty"` - - // terminalFd is the fd of the current terminal, if any. It is used - // to get the terminal width. - terminalFd uintptr - - // nowFunc is used to override the current time in tests. - nowFunc func() time.Time - - // winSize is used to override the terminal width in tests. - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - return fmt.Sprintf("%8v", units.HumanSize(float64(p.Current))) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// now returns the current time in UTC, but can be overridden in tests -// by setting JSONProgress.nowFunc to a custom function. -func (p *JSONProgress) now() time.Time { - if p.nowFunc != nil { - return p.nowFunc() - } - return time.Now().UTC() -} - -// width returns the current terminal's width, but can be overridden -// in tests by setting JSONProgress.winSize to a non-zero value. -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display prints the JSONMessage to out. If isTerminal is true, it erases -// the entire current line when displaying the progressbar. It returns an -// error if the [JSONMessage.Error] field is non-nil. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream reads a JSON message stream from in, and writes -// each [JSONMessage] to out. It returns an error if an invalid JSONMessage -// is received, or if a JSONMessage containers a non-zero [JSONMessage.Error]. -// -// Presentation of the JSONMessage depends on whether a terminal is attached, -// and on the terminal width. Progress bars ([JSONProgress]) are suppressed -// on narrower terminals (< 110 characters). -// -// - isTerminal describes if out is a terminal, in which case it prints -// a newline ("\n") at the end of each line and moves the cursor while -// displaying. -// - terminalFd is the fd of the current terminal (if any), and used -// to get the terminal width. -// - auxCallback allows handling the [JSONMessage.Aux] field. It is -// called if a JSONMessage contains an Aux field, in which case -// DisplayJSONMessagesStream does not present the JSONMessage. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -// Stream is an io.Writer for output with utilities to get the output's file -// descriptor and to detect wether it's a terminal. -// -// it is subset of the streams.Out type in -// https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out -type Stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output Stream. It is -// used by the Docker CLI to print JSONMessage streams. -func DisplayJSONMessagesToStream(in io.Reader, stream Stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 3792c67a9..000000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools // import "github.com/docker/docker/pkg/pools" - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -const buffer32K = 32 * 1024 - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) - buffer32KPool = newBufferPoolWithSize(buffer32K) -) - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -type bufferPool struct { - pool sync.Pool -} - -func newBufferPoolWithSize(size int) *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() interface{} { s := make([]byte, size); return &s }, - }, - } -} - -func (bp *bufferPool) Get() *[]byte { - return bp.pool.Get().(*[]byte) -} - -func (bp *bufferPool) Put(b *[]byte) { - bp.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := buffer32KPool.Get() - written, err = io.CopyBuffer(dst, src, *buf) - buffer32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 8f6e0a737..000000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,190 +0,0 @@ -package stdcopy // import "github.com/docker/docker/pkg/stdcopy" - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - // Systemerr represents errors originating from the system that make it - // into the multiplexed stream. - Systemerr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - stream := StdType(buf[stdWriterFdIndex]) - // Check the first byte to know where to write - switch stream { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - case Systemerr: - // If we're on Systemerr, we won't write anywhere. - // NB: if this code changes later, make sure you don't try to write - // to outstream if Systemerr is the stream - out = nil - default: - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // we might have an error from the source mixed up in our multiplexed - // stream. if we do, return it. - if stream == Systemerr { - return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - return 0, ew - } - - // If the frame has not been fully written: error - if nw != frameSize { - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go deleted file mode 100644 index b7c9487a0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/args_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - - "golang.org/x/sys/windows" -) - -// EscapeArgs makes a Windows-style escaped command line from a set of arguments -func EscapeArgs(args []string) string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = windows.EscapeArg(a) - } - return strings.Join(escapedArgs, " ") -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index 6a6bca43e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,48 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "time" - "unsafe" -) - -// Used by Chtimes -var unixEpochTime, unixMaxTime time.Time - -func init() { - unixEpochTime = time.Unix(0, 0) - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - // - // Note that this intentionally sets nsec (not sec), which sets both sec - // and nsec internally in time.Unix(); - // https://github.com/golang/go/blob/go1.19.2/src/time/time.go#L1364-L1380 - unixMaxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - unixMaxTime = time.Unix(1<<31-1, 0) - } -} - -// Chtimes changes the access time and modified time of a file at the given path. -// If the modified time is prior to the Unix Epoch (unixMinTime), or after the -// end of Unix Time (unixEpochTime), os.Chtimes has undefined behavior. In this -// case, Chtimes defaults to Unix Epoch, just in case. -func Chtimes(name string, atime time.Time, mtime time.Time) error { - if atime.Before(unixEpochTime) || atime.After(unixMaxTime) { - atime = unixEpochTime - } - - if mtime.Before(unixEpochTime) || mtime.After(unixMaxTime) { - mtime = unixEpochTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - return setCTime(name, mtime) -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go deleted file mode 100644 index 92ff02097..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" -) - -// setCTime will set the create time on a file. On Unix, the create -// time is updated as a side effect of setting the modified time, so -// no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index ab478f5c3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" - - "golang.org/x/sys/windows" -) - -// setCTime will set the create time on a file. On Windows, this requires -// calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return err - } - h, err := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return err - } - defer windows.Close(h) - c := windows.NsecToFiletime(ctime.UnixNano()) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index f4bbcce74..000000000 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "errors" - -// ErrNotSupportedPlatform means the platform is not supported. -var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index ce5990c91..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -package system - -import ( - "os" - "path/filepath" - "strings" -) - -// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. -// -// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 -// as absolute as it doesn't start with a drive-letter/colon combination. However, -// in docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon). This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go deleted file mode 100644 index f01f9385e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return os.MkdirAll(path, perm) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 92e972ea2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,135 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "regexp" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System. -const SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - -// volumePath is a regular expression to check if a path is a Windows -// volume path (e.g., "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}" -// or "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}\"). -var volumePath = regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}\\?$`) - -// MkdirAllWithACL is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// an appropriate SDDL defined ACL. -func MkdirAllWithACL(path string, _ os.FileMode, sddl string) error { - sa, err := makeSecurityAttributes(sddl) - if err != nil { - return &os.PathError{Op: "mkdirall", Path: path, Err: err} - } - return mkdirall(path, sa) -} - -// MkdirAll is a custom version of os.MkdirAll that is volume path aware for -// Windows. It can be used as a drop-in replacement for os.MkdirAll. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, nil) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, perm *windows.SecurityAttributes) error { - if volumePath.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent. - err = mkdirall(fixRootDirectory(path[:j-1]), perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = mkdirWithACL(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string, sa *windows.SecurityAttributes) error { - if sa == nil { - return os.Mkdir(name, 0) - } - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - err = windows.CreateDirectory(namep, sa) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return nil -} - -// fixRootDirectory fixes a reference to a drive's root directory to -// have the required trailing slash. -func fixRootDirectory(p string) string { - if len(p) == len(`\\?\c:`) { - if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { - return p + `\` - } - } - return p -} - -func makeSecurityAttributes(sddl string) (*windows.SecurityAttributes, error) { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - var err error - sa.SecurityDescriptor, err = windows.SecurityDescriptorFromString(sddl) - if err != nil { - return nil, err - } - return &sa, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go deleted file mode 100644 index 7603efbbd..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// containerdRuntimeSupported determines if containerd should be the runtime. -var containerdRuntimeSupported = false - -// InitContainerdRuntime sets whether to use containerd for runtime on Windows. -func InitContainerdRuntime(cdPath string) { - if len(cdPath) > 0 { - containerdRuntimeSupported = true - } -} - -// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported. -func ContainerdRuntimeSupported() bool { - return containerdRuntimeSupported -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go deleted file mode 100644 index 5e29a6b3b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 359c791d9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 2a62237a4..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(unix.Mkdev(uint32(major), uint32(minor))) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go deleted file mode 100644 index e218e742d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, uint64(dev)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go deleted file mode 100644 index 34df0b923..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !freebsd && !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go deleted file mode 100644 index 435b776ee..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build freebsd || netbsd - -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go deleted file mode 100644 index e0b629df0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ /dev/null @@ -1,15 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_illumos.go b/vendor/github.com/docker/docker/pkg/system/stat_illumos.go deleted file mode 100644 index 851374e5d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_illumos.go +++ /dev/null @@ -1,15 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 4309d42b9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - // the type is 32bit on mips - rdev: uint64(s.Rdev), //nolint: unconvert - mtim: s.Mtim, - }, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go deleted file mode 100644 index 851374e5d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go deleted file mode 100644 index 205e54677..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// IsDir reports whether s describes a directory. -func (s StatT) IsDir() bool { - return s.mode&syscall.S_IFDIR != 0 -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, &os.PathError{Op: "Stat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 10876cd73..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return s.mtim -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime(), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go deleted file mode 100644 index f3a079f88..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build linux || freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - uts := []unix.Timespec{ - unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), - unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), - } - err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) - if err != nil && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 7c19d5915..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !linux && !freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs.go b/vendor/github.com/docker/docker/pkg/system/xattrs.go deleted file mode 100644 index b3f4e8a21..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs.go +++ /dev/null @@ -1,18 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -type XattrError struct { - Op string - Attr string - Path string - Err error -} - -func (e *XattrError) Error() string { return e.Op + " " + e.Attr + " " + e.Path + ": " + e.Err.Error() } - -func (e *XattrError) Unwrap() error { return e.Err } - -// Timeout reports whether this error represents a timeout. -func (e *XattrError) Timeout() bool { - t, ok := e.Err.(interface{ Timeout() bool }) - return ok && t.Timeout() -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index facfbb312..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - sysErr := func(err error) ([]byte, error) { - return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} - } - - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return sysErr(errno) - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENODATA: - return nil, nil - case errno != nil: - return sysErr(errno) - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - err := unix.Lsetxattr(path, attr, data, flags) - if err != nil { - return &XattrError{Op: "lsetxattr", Attr: attr, Path: path, Err: err} - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 2a3698f12..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux - -package system // import "github.com/docker/docker/pkg/system" - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index 4049d780c..000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,240 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerPort := parts[n-1] - - switch n { - case 1: - return "", "", containerPort - case 2: - return "", parts[0], containerPort - case 3: - return parts[0], parts[1], containerPort - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerPort - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - ip, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - if ip != "" && ip[0] == '[' { - // Strip [] from IPV6 addresses - rawIP, _, err := net.SplitHostPort(ip + ":") - if err != nil { - return nil, fmt.Errorf("invalid IP address %v: %w", ip, err) - } - ip = rawIP - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("invalid IP address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("no port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index e4b53e8a3..000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,33 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("empty string specified for ports") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("invalid range specified for port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index b6eed145e..000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Less sorts the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok && len(binding) > 0 { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go deleted file mode 100644 index 99846ffdd..000000000 --- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go +++ /dev/null @@ -1,81 +0,0 @@ -package sockets - -import ( - "errors" - "net" - "sync" -) - -var errClosed = errors.New("use of closed network connection") - -// InmemSocket implements net.Listener using in-memory only connections. -type InmemSocket struct { - chConn chan net.Conn - chClose chan struct{} - addr string - mu sync.Mutex -} - -// dummyAddr is used to satisfy net.Addr for the in-mem socket -// it is just stored as a string and returns the string for all calls -type dummyAddr string - -// NewInmemSocket creates an in-memory only net.Listener -// The addr argument can be any string, but is used to satisfy the `Addr()` part -// of the net.Listener interface -func NewInmemSocket(addr string, bufSize int) *InmemSocket { - return &InmemSocket{ - chConn: make(chan net.Conn, bufSize), - chClose: make(chan struct{}), - addr: addr, - } -} - -// Addr returns the socket's addr string to satisfy net.Listener -func (s *InmemSocket) Addr() net.Addr { - return dummyAddr(s.addr) -} - -// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. -func (s *InmemSocket) Accept() (net.Conn, error) { - select { - case conn := <-s.chConn: - return conn, nil - case <-s.chClose: - return nil, errClosed - } -} - -// Close closes the listener. It will be unavailable for use once closed. -func (s *InmemSocket) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - select { - case <-s.chClose: - default: - close(s.chClose) - } - return nil -} - -// Dial is used to establish a connection with the in-mem server -func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { - srvConn, clientConn := net.Pipe() - select { - case s.chConn <- srvConn: - case <-s.chClose: - return nil, errClosed - } - - return clientConn, nil -} - -// Network returns the addr string, satisfies net.Addr -func (a dummyAddr) Network() string { - return string(a) -} - -// String returns the string form -func (a dummyAddr) String() string { - return string(a) -} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go deleted file mode 100644 index c897cb02a..000000000 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ /dev/null @@ -1,28 +0,0 @@ -package sockets - -import ( - "net" - "os" - "strings" -) - -// GetProxyEnv allows access to the uppercase and the lowercase forms of -// proxy-related variables. See the Go specification for details on these -// variables. https://golang.org/pkg/net/http/ -func GetProxyEnv(key string) string { - proxyValue := os.Getenv(strings.ToUpper(key)) - if proxyValue == "" { - return os.Getenv(strings.ToLower(key)) - } - return proxyValue -} - -// DialerFromEnvironment was previously used to configure a net.Dialer to route -// connections through a SOCKS proxy. -// DEPRECATED: SOCKS proxies are now supported by configuring only -// http.Transport.Proxy, and no longer require changing http.Transport.Dial. -// Therefore, only sockets.ConfigureTransport() needs to be called, and any -// sockets.DialerFromEnvironment() calls can be dropped. -func DialerFromEnvironment(direct *net.Dialer) (*net.Dialer, error) { - return direct, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go deleted file mode 100644 index b0eae239d..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ /dev/null @@ -1,37 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "errors" - "net" - "net/http" - "time" -) - -const defaultTimeout = 10 * time.Second - -// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. -var ErrProtocolNotAvailable = errors.New("protocol not available") - -// ConfigureTransport configures the specified [http.Transport] according to the specified proto -// and addr. -// -// If the proto is unix (using a unix socket to communicate) or npipe the compression is disabled. -// For other protos, compression is enabled. If you want to manually enable/disable compression, -// make sure you do it _after_ any subsequent calls to ConfigureTransport is made against the same -// [http.Transport]. -func ConfigureTransport(tr *http.Transport, proto, addr string) error { - switch proto { - case "unix": - return configureUnixTransport(tr, proto, addr) - case "npipe": - return configureNpipeTransport(tr, proto, addr) - default: - tr.Proxy = http.ProxyFromEnvironment - tr.DisableCompression = false - tr.DialContext = (&net.Dialer{ - Timeout: defaultTimeout, - }).DialContext - } - return nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go deleted file mode 100644 index 78a34a980..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !windows - -package sockets - -import ( - "context" - "fmt" - "net" - "net/http" - "syscall" - "time" -) - -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("unix socket path %q is too long", addr) - } - // No need for compression in local communications. - tr.DisableCompression = true - dialer := &net.Dialer{ - Timeout: defaultTimeout, - } - tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return dialer.DialContext(ctx, proto, addr) - } - return nil -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { - return nil, syscall.EAFNOSUPPORT -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go deleted file mode 100644 index 7acafc5a2..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -package sockets - -import ( - "context" - "net" - "net/http" - "time" - - "github.com/Microsoft/go-winio" -) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - // No need for compression in local communications. - tr.DisableCompression = true - tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return winio.DialPipeContext(ctx, addr) - } - return nil -} - -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(addr, &timeout) -} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go deleted file mode 100644 index 53cbb6c79..000000000 --- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "crypto/tls" - "net" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go deleted file mode 100644 index b9233521e..000000000 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ /dev/null @@ -1,126 +0,0 @@ -//go:build !windows - -/* -Package sockets is a simple unix domain socket wrapper. - -# Usage - -For example: - - import( - "fmt" - "net" - "os" - "github.com/docker/go-connections/sockets" - ) - - func main() { - l, err := sockets.NewUnixSocketWithOpts("/path/to/sockets", - sockets.WithChown(0,0),sockets.WithChmod(0660)) - if err != nil { - panic(err) - } - echoStr := "hello" - - go func() { - for { - conn, err := l.Accept() - if err != nil { - return - } - conn.Write([]byte(echoStr)) - conn.Close() - } - }() - - conn, err := net.Dial("unix", path) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 5) - if _, err := conn.Read(buf); err != nil { - panic(err) - } else if string(buf) != echoStr { - panic(fmt.Errorf("msg may lost")) - } - } -*/ -package sockets - -import ( - "net" - "os" - "syscall" -) - -// SockOption sets up socket file's creating option -type SockOption func(string) error - -// WithChown modifies the socket file's uid and gid -func WithChown(uid, gid int) SockOption { - return func(path string) error { - if err := os.Chown(path, uid, gid); err != nil { - return err - } - return nil - } -} - -// WithChmod modifies socket file's access mode. -func WithChmod(mask os.FileMode) SockOption { - return func(path string) error { - if err := os.Chmod(path, mask); err != nil { - return err - } - return nil - } -} - -// NewUnixSocketWithOpts creates a unix socket with the specified options. -// By default, socket permissions are 0000 (i.e.: no access for anyone); pass -// WithChmod() and WithChown() to set the desired ownership and permissions. -// -// This function temporarily changes the system's "umask" to 0777 to work around -// a race condition between creating the socket and setting its permissions. While -// this should only be for a short duration, it may affect other processes that -// create files/directories during that period. -func NewUnixSocketWithOpts(path string, opts ...SockOption) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - - // net.Listen does not allow for permissions to be set. As a result, when - // specifying custom permissions ("WithChmod()"), there is a short time - // between creating the socket and applying the permissions, during which - // the socket permissions are Less restrictive than desired. - // - // To work around this limitation of net.Listen(), we temporarily set the - // umask to 0777, which forces the socket to be created with 000 permissions - // (i.e.: no access for anyone). After that, WithChmod() must be used to set - // the desired permissions. - // - // We don't use "defer" here, to reset the umask to its original value as soon - // as possible. Ideally we'd be able to detect if WithChmod() was passed as - // an option, and skip changing umask if default permissions are used. - origUmask := syscall.Umask(0o777) - l, err := net.Listen("unix", path) - syscall.Umask(origUmask) - if err != nil { - return nil, err - } - - for _, op := range opts { - if err := op(path); err != nil { - _ = l.Close() - return nil, err - } - } - - return l, nil -} - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { - return NewUnixSocketWithOpts(path, WithChown(0, gid), WithChmod(0o660)) -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool.go deleted file mode 100644 index f84c624ba..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool.go +++ /dev/null @@ -1,16 +0,0 @@ -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 606c98a38..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,261 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "os" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType - // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS - // creds will include exclusively the roots in that CA file. If no CA file is provided, - // the system pool will be used. - ExclusiveRootPools bool - MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted. - // - // Deprecated: Use of encrypted TLS private keys has been deprecated, and - // will be removed in a future release. Golang has deprecated support for - // legacy PEM encryption (as specified in RFC 1423), as it is insecure by - // design (see https://go-review.googlesource.com/c/go/+/264159). - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsConfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsConfig) - } - - return tlsConfig -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsConfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } - - for _, op := range ops { - op(tlsConfig) - } - - return tlsConfig -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - var ( - certPool *x509.CertPool - err error - ) - if exclusivePool { - certPool = x509.NewCertPool() - } else { - certPool, err = SystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to read system certificates: %v", err) - } - } - pemData, err := os.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pemData) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - return certPool, nil -} - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, - tls.VersionTLS13: {}, -} - -// isValidMinVersion checks that the input value is a valid tls minimum version -func isValidMinVersion(version uint16) bool { - _, ok := allTLSVersions[version] - return ok -} - -// adjustMinVersion sets the MinVersion on `config`, the input configuration. -// It assumes the current MinVersion on the `config` is the lowest allowed. -func adjustMinVersion(options Options, config *tls.Config) error { - if options.MinVersion > 0 { - if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) - } - if options.MinVersion < config.MinVersion { - return fmt.Errorf("requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) - } - config.MinVersion = options.MinVersion - } - - return nil -} - -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when trying to decrypt a TLS private key. -// -// Deprecated: Use of encrypted TLS private keys has been deprecated, and -// will be removed in a future release. Golang has deprecated support for -// legacy PEM encryption (as specified in RFC 1423), as it is insecure by -// design (see https://go-review.googlesource.com/c/go/+/264159). -func IsErrEncryptedKey(err error) bool { - return errors.Is(err, x509.IncorrectPasswordError) -} - -// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { - // this section makes some small changes to code from notary/tuf/utils/x509.go - pemBlock, _ := pem.Decode(keyBytes) - if pemBlock == nil { - return nil, fmt.Errorf("no valid private key found") - } - - var err error - if x509.IsEncryptedPEMBlock(pemBlock) { //nolint:staticcheck // Ignore SA1019 (IsEncryptedPEMBlock is deprecated) - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) //nolint:staticcheck // Ignore SA1019 (DecryptPEMBlock is deprecated) - if err != nil { - return nil, fmt.Errorf("private key is encrypted, but could not decrypt it: %w", err) - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) - } - - return keyBytes, nil -} - -// getCert returns a Certificate from the CertFile and KeyFile in 'options', -// if the key is encrypted, the Passphrase in 'options' will be used to -// decrypt it. -func getCert(options Options) ([]tls.Certificate, error) { - if options.CertFile == "" && options.KeyFile == "" { - return nil, nil - } - - cert, err := os.ReadFile(options.CertFile) - if err != nil { - return nil, err - } - - prKeyBytes, err := os.ReadFile(options.KeyFile) - if err != nil { - return nil, err - } - - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) - if err != nil { - return nil, err - } - - tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) - if err != nil { - return nil, err - } - - return []tls.Certificate{tlsCert}, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - tlsCerts, err := getCert(options) - if err != nil { - return nil, fmt.Errorf("could not load X509 key pair: %w", err) - } - tlsConfig.Certificates = tlsCerts - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("error reading X509 key pair - make sure the key is not encrypted (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index a82f9fa52..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c741..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d60552..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d..000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index c245a8951..000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,154 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[byte]int64 - -var ( - decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} - binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} -) - -var ( - decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -) - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - // TODO: rewrite to use strings.Cut if there's a space - // once Go < 1.18 is deprecated. - sep := strings.LastIndexAny(sizeStr, "01234567890. ") - if sep == -1 { - // There should be at least a digit. - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - var num, sfx string - if sizeStr[sep] != ' ' { - num = sizeStr[:sep+1] - sfx = sizeStr[sep+1:] - } else { - // Omit the space separator. - num = sizeStr[:sep] - sfx = sizeStr[sep+1:] - } - - size, err := strconv.ParseFloat(num, 64) - if err != nil { - return -1, err - } - // Backward compatibility: reject negative sizes. - if size < 0 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - if len(sfx) == 0 { - return int64(size), nil - } - - // Process the suffix. - - if len(sfx) > 3 { // Too long. - goto badSuffix - } - sfx = strings.ToLower(sfx) - // Trivial case: b suffix. - if sfx[0] == 'b' { - if len(sfx) > 1 { // no extra characters allowed after b. - goto badSuffix - } - return int64(size), nil - } - // A suffix from the map. - if mul, ok := uMap[sfx[0]]; ok { - size *= float64(mul) - } else { - goto badSuffix - } - - // The suffix may have extra "b" or "ib" (e.g. KiB or MB). - switch { - case len(sfx) == 2 && sfx[1] != 'b': - goto badSuffix - case len(sfx) == 3 && sfx[1:] != "ib": - goto badSuffix - } - - return int64(size), nil - -badSuffix: - return -1, fmt.Errorf("invalid suffix: '%s'", sfx) -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc..000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt deleted file mode 100644 index e028b46a9..000000000 --- a/vendor/github.com/felixge/httpsnoop/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile deleted file mode 100644 index 4e12afdd9..000000000 --- a/vendor/github.com/felixge/httpsnoop/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -.PHONY: ci generate clean - -ci: clean generate - go test -race -v ./... - -generate: - go generate . - -clean: - rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md deleted file mode 100644 index cf6b42f3d..000000000 --- a/vendor/github.com/felixge/httpsnoop/README.md +++ /dev/null @@ -1,95 +0,0 @@ -# httpsnoop - -Package httpsnoop provides an easy way to capture http related metrics (i.e. -response time, bytes written, and http status code) from your application's -http.Handlers. - -Doing this requires non-trivial wrapping of the http.ResponseWriter interface, -which is also exposed for users interested in a more low-level API. - -[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) -[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) - -## Usage Example - -```go -// myH is your app's http handler, perhaps a http.ServeMux or similar. -var myH http.Handler -// wrappedH wraps myH in order to log every request. -wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - m := httpsnoop.CaptureMetrics(myH, w, r) - log.Printf( - "%s %s (code=%d dt=%s written=%d)", - r.Method, - r.URL, - m.Code, - m.Duration, - m.Written, - ) -}) -http.ListenAndServe(":8080", wrappedH) -``` - -## Why this package exists - -Instrumenting an application's http.Handler is surprisingly difficult. - -However if you google for e.g. "capture ResponseWriter status code" you'll find -lots of advise and code examples that suggest it to be a fairly trivial -undertaking. Unfortunately everything I've seen so far has a high chance of -breaking your application. - -The main problem is that a `http.ResponseWriter` often implements additional -interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and -`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` -in your own struct that also implements the `http.ResponseWriter` interface -will hide the additional interfaces mentioned above. This has a high change of -introducing subtle bugs into any non-trivial application. - -Another approach I've seen people take is to return a struct that implements -all of the interfaces above. However, that's also problematic, because it's -difficult to fake some of these interfaces behaviors when the underlying -`http.ResponseWriter` doesn't have an implementation. It's also dangerous, -because an application may choose to operate differently, merely because it -detects the presence of these additional interfaces. - -This package solves this problem by checking which additional interfaces a -`http.ResponseWriter` implements, returning a wrapped version implementing the -exact same set of interfaces. - -Additionally this package properly handles edge cases such as `WriteHeader` not -being called, or called more than once, as well as concurrent calls to -`http.ResponseWriter` methods, and even calls happening after the wrapped -`ServeHTTP` has already returned. - -Unfortunately this package is not perfect either. It's possible that it is -still missing some interfaces provided by the go core (let me know if you find -one), and it won't work for applications adding their own interfaces into the -mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying -`http.ResponseWriter` and type-assert the result to its other interfaces. - -However, hopefully the explanation above has sufficiently scared you of rolling -your own solution to this problem. httpsnoop may still break your application, -but at least it tries to avoid it as much as possible. - -Anyway, the real problem here is that smuggling additional interfaces inside -`http.ResponseWriter` is a problematic design choice, but it probably goes as -deep as the Go language specification itself. But that's okay, I still prefer -Go over the alternatives ;). - -## Performance - -``` -BenchmarkBaseline-8 20000 94912 ns/op -BenchmarkCaptureMetrics-8 20000 95461 ns/op -``` - -As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an -overhead of ~500 ns per http request on my machine. However, the margin of -error appears to be larger than that, therefor it should be reasonable to -assume that the overhead introduced by `CaptureMetrics` is absolutely -negligible. - -## License - -MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go deleted file mode 100644 index bec7b71b3..000000000 --- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go +++ /dev/null @@ -1,86 +0,0 @@ -package httpsnoop - -import ( - "io" - "net/http" - "time" -) - -// Metrics holds metrics captured from CaptureMetrics. -type Metrics struct { - // Code is the first http response code passed to the WriteHeader func of - // the ResponseWriter. If no such call is made, a default code of 200 is - // assumed instead. - Code int - // Duration is the time it took to execute the handler. - Duration time.Duration - // Written is the number of bytes successfully written by the Write or - // ReadFrom function of the ResponseWriter. ResponseWriters may also write - // data to their underlaying connection directly (e.g. headers), but those - // are not tracked. Therefor the number of Written bytes will usually match - // the size of the response body. - Written int64 -} - -// CaptureMetrics wraps the given hnd, executes it with the given w and r, and -// returns the metrics it captured from it. -func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { - return CaptureMetricsFn(w, func(ww http.ResponseWriter) { - hnd.ServeHTTP(ww, r) - }) -} - -// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the -// resulting metrics. This is very similar to CaptureMetrics (which is just -// sugar on top of this func), but is a more usable interface if your -// application doesn't use the Go http.Handler interface. -func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { - m := Metrics{Code: http.StatusOK} - m.CaptureMetrics(w, fn) - return m -} - -// CaptureMetrics wraps w and calls fn with the wrapped w and updates -// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, -// but allows one to customize starting Metrics object. -func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { - var ( - start = time.Now() - headerWritten bool - hooks = Hooks{ - WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { - return func(code int) { - next(code) - - if !(code >= 100 && code <= 199) && !headerWritten { - m.Code = code - headerWritten = true - } - } - }, - - Write: func(next WriteFunc) WriteFunc { - return func(p []byte) (int, error) { - n, err := next(p) - - m.Written += int64(n) - headerWritten = true - return n, err - } - }, - - ReadFrom: func(next ReadFromFunc) ReadFromFunc { - return func(src io.Reader) (int64, error) { - n, err := next(src) - - headerWritten = true - m.Written += n - return n, err - } - }, - } - ) - - fn(Wrap(w, hooks)) - m.Duration += time.Since(start) -} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go deleted file mode 100644 index 203c35b3c..000000000 --- a/vendor/github.com/felixge/httpsnoop/docs.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package httpsnoop provides an easy way to capture http related metrics (i.e. -// response time, bytes written, and http status code) from your application's -// http.Handlers. -// -// Doing this requires non-trivial wrapping of the http.ResponseWriter -// interface, which is also exposed for users interested in a more low-level -// API. -package httpsnoop - -//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go deleted file mode 100644 index 101cedde6..000000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go +++ /dev/null @@ -1,436 +0,0 @@ -// +build go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT. - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// PushFunc is part of the http.Pusher interface. -type PushFunc func(target string, opts *http.PushOptions) error - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc - Push func(PushFunc) PushFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// - http.Pusher -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - _, i4 := w.(http.Pusher) - switch { - // combination 1/32 - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - }{rw, rw} - // combination 2/32 - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Pusher - }{rw, rw, rw} - // combination 3/32 - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - }{rw, rw, rw} - // combination 4/32 - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw} - // combination 5/32 - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - }{rw, rw, rw} - // combination 6/32 - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - http.Pusher - }{rw, rw, rw, rw} - // combination 7/32 - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 8/32 - case !i0 && !i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 9/32 - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - }{rw, rw, rw} - // combination 10/32 - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{rw, rw, rw, rw} - // combination 11/32 - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 12/32 - case !i0 && i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 13/32 - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 14/32 - case !i0 && i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 15/32 - case !i0 && i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 16/32 - case !i0 && i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 17/32 - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - }{rw, rw, rw} - // combination 18/32 - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Pusher - }{rw, rw, rw, rw} - // combination 19/32 - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 20/32 - case i0 && !i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 21/32 - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw, rw} - // combination 22/32 - case i0 && !i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 23/32 - case i0 && !i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 24/32 - case i0 && !i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 25/32 - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw, rw} - // combination 26/32 - case i0 && i1 && !i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Pusher - }{rw, rw, rw, rw, rw} - // combination 27/32 - case i0 && i1 && !i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 28/32 - case i0 && i1 && !i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 29/32 - case i0 && i1 && i2 && !i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw, rw} - // combination 30/32 - case i0 && i1 && i2 && !i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - http.Pusher - }{rw, rw, rw, rw, rw, rw} - // combination 31/32 - case i0 && i1 && i2 && i3 && !i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw, rw} - // combination 32/32 - case i0 && i1 && i2 && i3 && i4: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - http.Pusher - }{rw, rw, rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Unwrap() http.ResponseWriter { - return w.w -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} - -func (w *rw) Push(target string, opts *http.PushOptions) error { - f := w.w.(http.Pusher).Push - if w.h.Push != nil { - f = w.h.Push(f) - } - return f(target, opts) -} - -type Unwrapper interface { - Unwrap() http.ResponseWriter -} - -// Unwrap returns the underlying http.ResponseWriter from within zero or more -// layers of httpsnoop wrappers. -func Unwrap(w http.ResponseWriter) http.ResponseWriter { - if rw, ok := w.(Unwrapper); ok { - // recurse until rw.Unwrap() returns a non-Unwrapper - return Unwrap(rw.Unwrap()) - } else { - return w - } -} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go deleted file mode 100644 index e0951df15..000000000 --- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go +++ /dev/null @@ -1,278 +0,0 @@ -// +build !go1.8 -// Code generated by "httpsnoop/codegen"; DO NOT EDIT. - -package httpsnoop - -import ( - "bufio" - "io" - "net" - "net/http" -) - -// HeaderFunc is part of the http.ResponseWriter interface. -type HeaderFunc func() http.Header - -// WriteHeaderFunc is part of the http.ResponseWriter interface. -type WriteHeaderFunc func(code int) - -// WriteFunc is part of the http.ResponseWriter interface. -type WriteFunc func(b []byte) (int, error) - -// FlushFunc is part of the http.Flusher interface. -type FlushFunc func() - -// CloseNotifyFunc is part of the http.CloseNotifier interface. -type CloseNotifyFunc func() <-chan bool - -// HijackFunc is part of the http.Hijacker interface. -type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) - -// ReadFromFunc is part of the io.ReaderFrom interface. -type ReadFromFunc func(src io.Reader) (int64, error) - -// Hooks defines a set of method interceptors for methods included in -// http.ResponseWriter as well as some others. You can think of them as -// middleware for the function calls they target. See Wrap for more details. -type Hooks struct { - Header func(HeaderFunc) HeaderFunc - WriteHeader func(WriteHeaderFunc) WriteHeaderFunc - Write func(WriteFunc) WriteFunc - Flush func(FlushFunc) FlushFunc - CloseNotify func(CloseNotifyFunc) CloseNotifyFunc - Hijack func(HijackFunc) HijackFunc - ReadFrom func(ReadFromFunc) ReadFromFunc -} - -// Wrap returns a wrapped version of w that provides the exact same interface -// as w. Specifically if w implements any combination of: -// -// - http.Flusher -// - http.CloseNotifier -// - http.Hijacker -// - io.ReaderFrom -// -// The wrapped version will implement the exact same combination. If no hooks -// are set, the wrapped version also behaves exactly as w. Hooks targeting -// methods not supported by w are ignored. Any other hooks will intercept the -// method they target and may modify the call's arguments and/or return values. -// The CaptureMetrics implementation serves as a working example for how the -// hooks can be used. -func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { - rw := &rw{w: w, h: hooks} - _, i0 := w.(http.Flusher) - _, i1 := w.(http.CloseNotifier) - _, i2 := w.(http.Hijacker) - _, i3 := w.(io.ReaderFrom) - switch { - // combination 1/16 - case !i0 && !i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - }{rw, rw} - // combination 2/16 - case !i0 && !i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - io.ReaderFrom - }{rw, rw, rw} - // combination 3/16 - case !i0 && !i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - }{rw, rw, rw} - // combination 4/16 - case !i0 && !i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 5/16 - case !i0 && i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - }{rw, rw, rw} - // combination 6/16 - case !i0 && i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 7/16 - case !i0 && i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw} - // combination 8/16 - case !i0 && i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 9/16 - case i0 && !i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - }{rw, rw, rw} - // combination 10/16 - case i0 && !i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{rw, rw, rw, rw} - // combination 11/16 - case i0 && !i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - }{rw, rw, rw, rw} - // combination 12/16 - case i0 && !i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 13/16 - case i0 && i1 && !i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - }{rw, rw, rw, rw} - // combination 14/16 - case i0 && i1 && !i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - io.ReaderFrom - }{rw, rw, rw, rw, rw} - // combination 15/16 - case i0 && i1 && i2 && !i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - }{rw, rw, rw, rw, rw} - // combination 16/16 - case i0 && i1 && i2 && i3: - return struct { - Unwrapper - http.ResponseWriter - http.Flusher - http.CloseNotifier - http.Hijacker - io.ReaderFrom - }{rw, rw, rw, rw, rw, rw} - } - panic("unreachable") -} - -type rw struct { - w http.ResponseWriter - h Hooks -} - -func (w *rw) Unwrap() http.ResponseWriter { - return w.w -} - -func (w *rw) Header() http.Header { - f := w.w.(http.ResponseWriter).Header - if w.h.Header != nil { - f = w.h.Header(f) - } - return f() -} - -func (w *rw) WriteHeader(code int) { - f := w.w.(http.ResponseWriter).WriteHeader - if w.h.WriteHeader != nil { - f = w.h.WriteHeader(f) - } - f(code) -} - -func (w *rw) Write(b []byte) (int, error) { - f := w.w.(http.ResponseWriter).Write - if w.h.Write != nil { - f = w.h.Write(f) - } - return f(b) -} - -func (w *rw) Flush() { - f := w.w.(http.Flusher).Flush - if w.h.Flush != nil { - f = w.h.Flush(f) - } - f() -} - -func (w *rw) CloseNotify() <-chan bool { - f := w.w.(http.CloseNotifier).CloseNotify - if w.h.CloseNotify != nil { - f = w.h.CloseNotify(f) - } - return f() -} - -func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { - f := w.w.(http.Hijacker).Hijack - if w.h.Hijack != nil { - f = w.h.Hijack(f) - } - return f() -} - -func (w *rw) ReadFrom(src io.Reader) (int64, error) { - f := w.w.(io.ReaderFrom).ReadFrom - if w.h.ReadFrom != nil { - f = w.h.ReadFrom(f) - } - return f(src) -} - -type Unwrapper interface { - Unwrap() http.ResponseWriter -} - -// Unwrap returns the underlying http.ResponseWriter from within zero or more -// layers of httpsnoop wrappers. -func Unwrap(w http.ResponseWriter) http.ResponseWriter { - if rw, ok := w.(Unwrapper); ok { - // recurse until rw.Unwrap() returns a non-Unwrapper - return Unwrap(rw.Unwrap()) - } else { - return w - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml deleted file mode 100644 index ffc7b992b..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ /dev/null @@ -1,13 +0,0 @@ -freebsd_task: - name: 'FreeBSD' - freebsd_instance: - image_family: freebsd-13-2 - install_script: - - pkg update -f - - pkg install -y go - test_script: - # run tests as user "cirrus" instead of root - - pw useradd cirrus -m - - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 391cc076b..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -# go test -c output -*.test -*.test.exe - -# Output of go build ./cmd/fsnotify -/fsnotify -/fsnotify.exe diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907f..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index e0e575754..000000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,541 +0,0 @@ -# Changelog - -Unreleased ----------- -Nothing yet. - -1.7.0 - 2023-10-22 ------------------- -This version of fsnotify needs Go 1.17. - -### Additions - -- illumos: add FEN backend to support illumos and Solaris. ([#371]) - -- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful - in cases where you can't control the kernel buffer and receive a large number - of events in bursts. ([#550], [#572]) - -- all: add `AddWith()`, which is identical to `Add()` but allows passing - options. ([#521]) - -- windows: allow setting the ReadDirectoryChangesW() buffer size with - `fsnotify.WithBufferSize()`; the default of 64K is the highest value that - works on all platforms and is enough for most purposes, but in some cases a - highest buffer is needed. ([#521]) - -### Changes and fixes - -- inotify: remove watcher if a watched path is renamed ([#518]) - - After a rename the reported name wasn't updated, or even an empty string. - Inotify doesn't provide any good facilities to update it, so just remove the - watcher. This is already how it worked on kqueue and FEN. - - On Windows this does work, and remains working. - -- windows: don't listen for file attribute changes ([#520]) - - File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, - with no way to see if they're a file write or attribute change, so would show - up as a fsnotify.Write event. This is never useful, and could result in many - spurious Write events. - -- windows: return `ErrEventOverflow` if the buffer is full ([#525]) - - Before it would merely return "short read", making it hard to detect this - error. - -- kqueue: make sure events for all files are delivered properly when removing a - watched directory ([#526]) - - Previously they would get sent with `""` (empty string) or `"."` as the path - name. - -- kqueue: don't emit spurious Create events for symbolic links ([#524]) - - The link would get resolved but kqueue would "forget" it already saw the link - itself, resulting on a Create for every Write event for the directory. - -- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) - -- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in - `backend_other.go`, making it easier to use on unsupported platforms such as - WASM, AIX, etc. ([#528]) - -- other: use the `backend_other.go` no-op if the `appengine` build tag is set; - Google AppEngine forbids usage of the unsafe package so the inotify backend - won't compile there. - -[#371]: https://github.com/fsnotify/fsnotify/pull/371 -[#516]: https://github.com/fsnotify/fsnotify/pull/516 -[#518]: https://github.com/fsnotify/fsnotify/pull/518 -[#520]: https://github.com/fsnotify/fsnotify/pull/520 -[#521]: https://github.com/fsnotify/fsnotify/pull/521 -[#524]: https://github.com/fsnotify/fsnotify/pull/524 -[#525]: https://github.com/fsnotify/fsnotify/pull/525 -[#526]: https://github.com/fsnotify/fsnotify/pull/526 -[#528]: https://github.com/fsnotify/fsnotify/pull/528 -[#537]: https://github.com/fsnotify/fsnotify/pull/537 -[#550]: https://github.com/fsnotify/fsnotify/pull/550 -[#572]: https://github.com/fsnotify/fsnotify/pull/572 - -1.6.0 - 2022-10-13 ------------------- -This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, -but not documented). It also increases the minimum Linux version to 2.6.32. - -### Additions - -- all: add `Event.Has()` and `Op.Has()` ([#477]) - - This makes checking events a lot easier; for example: - - if event.Op&Write == Write && !(event.Op&Remove == Remove) { - } - - Becomes: - - if event.Has(Write) && !event.Has(Remove) { - } - -- all: add cmd/fsnotify ([#463]) - - A command-line utility for testing and some examples. - -### Changes and fixes - -- inotify: don't ignore events for files that don't exist ([#260], [#470]) - - Previously the inotify watcher would call `os.Lstat()` to check if a file - still exists before emitting events. - - This was inconsistent with other platforms and resulted in inconsistent event - reporting (e.g. when a file is quickly removed and re-created), and generally - a source of confusion. It was added in 2013 to fix a memory leak that no - longer exists. - -- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's - not watched ([#460]) - -- inotify: replace epoll() with non-blocking inotify ([#434]) - - Non-blocking inotify was not generally available at the time this library was - written in 2014, but now it is. As a result, the minimum Linux version is - bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. - -- kqueue: don't check for events every 100ms ([#480]) - - The watcher would wake up every 100ms, even when there was nothing to do. Now - it waits until there is something to do. - -- macos: retry opening files on EINTR ([#475]) - -- kqueue: skip unreadable files ([#479]) - - kqueue requires a file descriptor for every file in a directory; this would - fail if a file was unreadable by the current user. Now these files are simply - skipped. - -- windows: fix renaming a watched directory if the parent is also watched ([#370]) - -- windows: increase buffer size from 4K to 64K ([#485]) - -- windows: close file handle on Remove() ([#288]) - -- kqueue: put pathname in the error if watching a file fails ([#471]) - -- inotify, windows: calling Close() more than once could race ([#465]) - -- kqueue: improve Close() performance ([#233]) - -- all: various documentation additions and clarifications. - -[#233]: https://github.com/fsnotify/fsnotify/pull/233 -[#260]: https://github.com/fsnotify/fsnotify/pull/260 -[#288]: https://github.com/fsnotify/fsnotify/pull/288 -[#370]: https://github.com/fsnotify/fsnotify/pull/370 -[#434]: https://github.com/fsnotify/fsnotify/pull/434 -[#460]: https://github.com/fsnotify/fsnotify/pull/460 -[#463]: https://github.com/fsnotify/fsnotify/pull/463 -[#465]: https://github.com/fsnotify/fsnotify/pull/465 -[#470]: https://github.com/fsnotify/fsnotify/pull/470 -[#471]: https://github.com/fsnotify/fsnotify/pull/471 -[#475]: https://github.com/fsnotify/fsnotify/pull/475 -[#477]: https://github.com/fsnotify/fsnotify/pull/477 -[#479]: https://github.com/fsnotify/fsnotify/pull/479 -[#480]: https://github.com/fsnotify/fsnotify/pull/480 -[#485]: https://github.com/fsnotify/fsnotify/pull/485 - -## [1.5.4] - 2022-04-25 - -* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447) -* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444) -* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443) - -## [1.5.3] - 2022-04-22 - -* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445) - -## [1.5.2] - 2022-04-21 - -* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374) -* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361) -* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424) -* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406) -* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416) - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394) - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.9] - 2020-03-11 - -* Move example usage to the readme #329. This may resolve #328. - -## [1.4.8] - 2020-03-10 - -* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) -* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) -* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) -* CI: Less verbosity (@nathany #267) -* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) -* Tests: Check if channels are closed in the example (@alexeykazakov #244) -* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) -* CI: Add windows to travis matrix (@cpuguy83 #284) -* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) -* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) -* Linux: open files with close-on-exec (@linxiulei #273) -* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) -* Project: Add go.mod (@nathany #309) -* Project: Revise editor config (@nathany #309) -* Project: Update copyright for 2019 (@nathany #309) -* CI: Drop go1.8 from CI matrix (@nathany #309) -* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) - -## [1.4.7] - 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## [1.4.2] - 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## [1.4.1] - 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## [1.4.0] - 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## [1.3.1] - 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## [1.3.0] - 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## [1.2.10] - 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## [1.2.9] - 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## [1.2.8] - 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## [1.2.5] - 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## [1.2.1] - 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## [1.2.0] - 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## [1.1.1] - 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## [1.1.0] - 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [1.0.4] - 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## [1.0.3] - 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## [1.0.2] - 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## [1.0.0] - 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## [0.9.3] - 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [0.9.2] - 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## [0.9.1] - 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## [0.9.0] - 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## [0.8.12] - 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## [0.8.11] - 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## [0.8.10] - 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## [0.8.9] - 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## [0.8.8] - 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## [0.8.7] - 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## [0.8.6] - 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## [0.8.5] - 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## [0.8.4] - 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## [0.8.3] - 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## [0.8.2] - 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## [0.8.1] - 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## [0.8.0] - 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## [0.7.4] - 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## [0.7.3] - 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## [0.7.2] - 2012-09-01 - -* kqueue: events for created directories - -## [0.7.1] - 2012-07-14 - -* [Fix] for renaming files - -## [0.7.0] - 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## [0.6.0] - 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## [0.5.1] - 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## [0.5.0] - 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## [0.4.0] - 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## [0.3.0] - 2012-02-19 - -* kqueue: add files when watch directory - -## [0.2.0] - 2011-12-30 - -* update to latest Go weekly code - -## [0.1.0] - 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index ea379759d..000000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -Thank you for your interest in contributing to fsnotify! We try to review and -merge PRs in a reasonable timeframe, but please be aware that: - -- To avoid "wasted" work, please discus changes on the issue tracker first. You - can just send PRs, but they may end up being rejected for one reason or the - other. - -- fsnotify is a cross-platform library, and changes must work reasonably well on - all supported platforms. - -- Changes will need to be compatible; old code should still compile, and the - runtime behaviour can't change in ways that are likely to lead to problems for - users. - -Testing -------- -Just `go test ./...` runs all the tests; the CI runs this on all supported -platforms. Testing different platforms locally can be done with something like -[goon] or [Vagrant], but this isn't super-easy to set up at the moment. - -Use the `-short` flag to make the "stress test" run faster. - - -[goon]: https://github.com/arp242/goon -[Vagrant]: https://www.vagrantup.com/ -[integration_test.go]: /integration_test.go diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index fb03ade75..000000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright © 2012 The Go Authors. All rights reserved. -Copyright © fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of Google Inc. nor the names of its contributors may be used - to endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index e480733d1..000000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,184 +0,0 @@ -fsnotify is a Go library to provide cross-platform filesystem notifications on -Windows, Linux, macOS, BSD, and illumos. - -Go 1.17 or newer is required; the full documentation is at -https://pkg.go.dev/github.com/fsnotify/fsnotify - ---- - -Platform support: - -| Backend | OS | Status | -| :-------------------- | :--------- | :------------------------------------------------------------------------ | -| inotify | Linux | Supported | -| kqueue | BSD, macOS | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FEN | illumos | Supported | -| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | -| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | -| USN Journals | Windows | [Needs support in x/sys/windows][usn] | -| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | - -Linux and illumos should include Android and Solaris, but these are currently -untested. - -[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 -[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 - -Usage ------ -A basic example: - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - // Create new watcher. - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - // Start listening for events. - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Has(fsnotify.Write) { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - // Add a path. - err = watcher.Add("/tmp") - if err != nil { - log.Fatal(err) - } - - // Block main goroutine forever. - <-make(chan struct{}) -} -``` - -Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be -run with: - - % go run ./cmd/fsnotify - -Further detailed documentation can be found in godoc: -https://pkg.go.dev/github.com/fsnotify/fsnotify - -FAQ ---- -### Will a file still be watched when it's moved to another directory? -No, not unless you are watching the location it was moved to. - -### Are subdirectories watched? -No, you must add watches for any directory you want to watch (a recursive -watcher is on the roadmap: [#18]). - -[#18]: https://github.com/fsnotify/fsnotify/issues/18 - -### Do I have to watch the Error and Event channels in a goroutine? -Yes. You can read both channels in the same goroutine using `select` (you don't -need a separate goroutine for both channels; see the example). - -### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? -fsnotify requires support from underlying OS to work. The current NFS and SMB -protocols does not provide network level support for file notifications, and -neither do the /proc and /sys virtual filesystems. - -This could be fixed with a polling watcher ([#9]), but it's not yet implemented. - -[#9]: https://github.com/fsnotify/fsnotify/issues/9 - -### Why do I get many Chmod events? -Some programs may generate a lot of attribute changes; for example Spotlight on -macOS, anti-virus programs, backup applications, and some others are known to do -this. As a rule, it's typically best to ignore Chmod events. They're often not -useful, and tend to cause problems. - -Spotlight indexing on macOS can result in multiple events (see [#15]). A -temporary workaround is to add your folder(s) to the *Spotlight Privacy -settings* until we have a native FSEvents implementation (see [#11]). - -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#15]: https://github.com/fsnotify/fsnotify/issues/15 - -### Watching a file doesn't work well -Watching individual files (rather than directories) is generally not recommended -as many programs (especially editors) update files atomically: it will write to -a temporary file which is then moved to to destination, overwriting the original -(or some variant thereof). The watcher on the original file is now lost, as that -no longer exists. - -The upshot of this is that a power failure or crash won't leave a half-written -file. - -Watch the parent directory and use `Event.Name` to filter out files you're not -interested in. There is an example of this in `cmd/fsnotify/file.go`. - -Platform-specific notes ------------------------ -### Linux -When a file is removed a REMOVE event won't be emitted until all file -descriptors are closed; it will emit a CHMOD instead: - - fp := os.Open("file") - os.Remove("file") // CHMOD - fp.Close() // REMOVE - -This is the event that inotify sends, so not much can be changed about this. - -The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for -the number of watches per user, and `fs.inotify.max_user_instances` specifies -the maximum number of inotify instances per user. Every Watcher you create is an -"instance", and every path you add is a "watch". - -These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and -`/proc/sys/fs/inotify/max_user_instances` - -To increase them you can use `sysctl` or write the value to proc file: - - # The default values on Linux 5.18 - sysctl fs.inotify.max_user_watches=124983 - sysctl fs.inotify.max_user_instances=128 - -To make the changes persist on reboot edit `/etc/sysctl.conf` or -`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your -distro's documentation): - - fs.inotify.max_user_watches=124983 - fs.inotify.max_user_instances=128 - -Reaching the limit will result in a "no space left on device" or "too many open -files" error. - -### kqueue (macOS, all BSD systems) -kqueue requires opening a file descriptor for every file that's being watched; -so if you're watching a directory with five files then that's six file -descriptors. You will run in to your system's "max open files" limit faster on -these platforms. - -The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to -control the maximum number of open files. diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go deleted file mode 100644 index 28497f1dd..000000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ /dev/null @@ -1,640 +0,0 @@ -//go:build solaris -// +build solaris - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - mu sync.Mutex - port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), - done: make(chan struct{}), - } - - var err error - w.port, err = unix.NewEventPort() - if err != nil { - return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) - } - - go w.readEvents() - return w, nil -} - -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { - select { - case w.Events <- Event{Name: name, Op: op}: - return true - case <-w.done: - return false - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { - return nil - } - close(w.done) - return w.port.Close() -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - if w.port.PathIsWatched(name) { - return nil - } - - _ = getOptions(opts...) - - // Currently we resolve symlinks that were explicitly requested to be - // watched. Otherwise we would use LStat here. - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Associate all files in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, true, w.associateFile) - if err != nil { - return err - } - - w.mu.Lock() - w.dirs[name] = struct{}{} - w.mu.Unlock() - return nil - } - - err = w.associateFile(name, stat, true) - if err != nil { - return err - } - - w.mu.Lock() - w.watches[name] = struct{}{} - w.mu.Unlock() - return nil -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - if !w.port.PathIsWatched(name) { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - // The user has expressed an intent. Immediately remove this name from - // whichever watch list it might be in. If it's not in there the delete - // doesn't cause harm. - w.mu.Lock() - delete(w.watches, name) - delete(w.dirs, name) - w.mu.Unlock() - - stat, err := os.Stat(name) - if err != nil { - return err - } - - // Remove associations for every file in the directory. - if stat.IsDir() { - err := w.handleDirectory(name, stat, false, w.dissociateFile) - if err != nil { - return err - } - return nil - } - - err = w.port.DissociatePath(name) - if err != nil { - return err - } - - return nil -} - -// readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { - // If this function returns, the watcher has been closed and we can close - // these channels - defer func() { - close(w.Errors) - close(w.Events) - }() - - pevents := make([]unix.PortEvent, 8) - for { - count, err := w.port.Get(pevents, 1, nil) - if err != nil && err != unix.ETIME { - // Interrupted system call (count should be 0) ignore and continue - if errors.Is(err, unix.EINTR) && count == 0 { - continue - } - // Get failed because we called w.Close() - if errors.Is(err, unix.EBADF) && w.isClosed() { - return - } - // There was an error not caused by calling w.Close() - if !w.sendError(err) { - return - } - } - - p := pevents[:count] - for _, pevent := range p { - if pevent.Source != unix.PORT_SOURCE_FILE { - // Event from unexpected source received; should never happen. - if !w.sendError(errors.New("Event from unexpected source received")) { - return - } - continue - } - - err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } - } - } - } -} - -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { - files, err := os.ReadDir(path) - if err != nil { - return err - } - - // Handle all children of the directory. - for _, entry := range files { - finfo, err := entry.Info() - if err != nil { - return err - } - err = handler(filepath.Join(path, finfo.Name()), finfo, false) - if err != nil { - return err - } - } - - // And finally handle the directory itself. - return handler(path, stat, follow) -} - -// handleEvent might need to emit more than one fsnotify event if the events -// bitmap matches more than one event type (e.g. the file was both modified and -// had the attributes changed between when the association was created and the -// when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { - var ( - events = event.Events - path = event.Path - fmode = event.Cookie.(os.FileMode) - reRegister = true - ) - - w.mu.Lock() - _, watchedDir := w.dirs[path] - _, watchedPath := w.watches[path] - w.mu.Unlock() - isWatched := watchedDir || watchedPath - - if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { - return nil - } - reRegister = false - } - if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { - return nil - } - // Don't keep watching the new file name - reRegister = false - } - if events&unix.FILE_RENAME_TO != 0 { - // We don't report a Rename event for this case, because Rename events - // are interpreted as referring to the _old_ name of the file, and in - // this case the event would refer to the new name of the file. This - // type of rename event is not supported by fsnotify. - - // inotify reports a Remove event in this case, so we simulate this - // here. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't keep watching the file that was removed - reRegister = false - } - - // The file is gone, nothing left to do. - if !reRegister { - if watchedDir { - w.mu.Lock() - delete(w.dirs, path) - w.mu.Unlock() - } - if watchedPath { - w.mu.Lock() - delete(w.watches, path) - w.mu.Unlock() - } - return nil - } - - // If we didn't get a deletion the file still exists and we're going to have - // to watch it again. Let's Stat it now so that we can compare permissions - // and have what we need to continue watching the file - - stat, err := os.Lstat(path) - if err != nil { - // This is unexpected, but we should still emit an event. This happens - // most often on "rm -r" of a subdirectory inside a watched directory We - // get a modify event of something happening inside, but by the time we - // get here, the sudirectory is already gone. Clearly we were watching - // this path but now it is gone. Let's tell the user that it was - // removed. - if !w.sendEvent(path, Remove) { - return nil - } - // Suppress extra write events on removed directories; they are not - // informative and can be confusing. - return nil - } - - // resolve symlinks that were explicitly watched as we would have at Add() - // time. this helps suppress spurious Chmod events on watched symlinks - if isWatched { - stat, err = os.Stat(path) - if err != nil { - // The symlink still exists, but the target is gone. Report the - // Remove similar to above. - if !w.sendEvent(path, Remove) { - return nil - } - // Don't return the error - } - } - - if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } else { - if !w.sendEvent(path, Write) { - return nil - } - } - } - if events&unix.FILE_ATTRIB != 0 && stat != nil { - // Only send Chmod if perms changed - if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { - return nil - } - } - } - - if stat != nil { - // If we get here, it means we've hit an event above that requires us to - // continue watching the file or directory - return w.associateFile(path, stat, isWatched) - } - return nil -} - -func (w *Watcher) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. - files, err := os.ReadDir(path) - if err != nil { - return err - } - - for _, entry := range files { - path := filepath.Join(path, entry.Name()) - if w.port.PathIsWatched(path) { - continue - } - - finfo, err := entry.Info() - if err != nil { - return err - } - err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } - } - if !w.sendEvent(path, Create) { - return nil - } - } - return nil -} - -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { - if w.isClosed() { - return ErrClosed - } - // This is primarily protecting the call to AssociatePath but it is - // important and intentional that the call to PathIsWatched is also - // protected by this mutex. Without this mutex, AssociatePath has been seen - // to error out that the path is already associated. - w.mu.Lock() - defer w.mu.Unlock() - - if w.port.PathIsWatched(path) { - // Remove the old association in favor of this one If we get ENOENT, - // then while the x/sys/unix wrapper still thought that this path was - // associated, the underlying event port did not. This call will have - // cleared up that discrepancy. The most likely cause is that the event - // has fired but we haven't processed it yet. - err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { - return err - } - } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB - } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) -} - -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { - if !w.port.PathIsWatched(path) { - return nil - } - return w.port.DissociatePath(path) -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)+len(w.dirs)) - for pathname := range w.dirs { - entries = append(entries, pathname) - } - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go deleted file mode 100644 index 921c1c1e4..000000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ /dev/null @@ -1,594 +0,0 @@ -//go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - // Store fd here as os.File.Read() will no longer return on close after - // calling Fd(). See: https://github.com/golang/go/issues/26439 - fd int - inotifyFile *os.File - watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex - doneResp chan struct{} // Channel to respond to Close -} - -type ( - watches struct { - mu sync.RWMutex - wd map[uint32]*watch // wd → watch - path map[string]uint32 // pathname → wd - } - watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. - } -) - -func newWatches() *watches { - return &watches{ - wd: make(map[uint32]*watch), - path: make(map[string]uint32), - } -} - -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) - delete(w.wd, wd) -} - -func (w *watches) removePath(path string) (uint32, bool) { - w.mu.Lock() - defer w.mu.Unlock() - - wd, ok := w.path[path] - if !ok { - return 0, false - } - - delete(w.path, path) - delete(w.wd, wd) - - return wd, true -} - -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - -func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - - var existing *watch - wd, ok := w.path[path] - if ok { - existing = w.wd[wd] - } - - upd, err := f(existing) - if err != nil { - return err - } - if upd != nil { - w.wd[upd.wd] = upd - w.path[upd.path] = upd.wd - - if upd.wd != wd { - delete(w.wd, wd) - } - } - - return nil -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - // Need to set nonblocking mode for SetDeadline to work, otherwise blocking - // I/O operations won't terminate on close. - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) - if fd == -1 { - return nil, errno - } - - w := &Watcher{ - fd: fd, - inotifyFile: os.NewFile(uintptr(fd), ""), - watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() - if w.isClosed() { - w.closeMu.Unlock() - return nil - } - close(w.done) - w.closeMu.Unlock() - - // Causes any blocking reads to return with an error, provided the file - // still supports deadline operations. - err := w.inotifyFile.Close() - if err != nil { - return err - } - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - name = filepath.Clean(name) - _ = getOptions(opts...) - - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { - if existing != nil { - flags |= existing.flags | unix.IN_MASK_ADD - } - - wd, err := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return nil, err - } - - if existing == nil { - return &watch{ - wd: uint32(wd), - path: name, - flags: flags, - }, nil - } - - existing.wd = uint32(wd) - existing.flags = flags - return existing, nil - }) -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - return w.remove(filepath.Clean(name)) -} - -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() - for pathname := range w.watches.path { - entries = append(entries, pathname) - } - w.watches.mu.RUnlock() - - return entries -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - defer func() { - close(w.doneResp) - close(w.Errors) - close(w.Events) - }() - - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) - for { - // See if we have been closed. - if w.isClosed() { - return - } - - n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: - if !w.sendError(err) { - return - } - continue - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. - } - if !w.sendError(err) { - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - ) - - if mask&unix.IN_Q_OVERFLOW != 0 { - if !w.sendError(ErrEventOverflow) { - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - watch := w.watches.byWd(uint32(raw.Wd)) - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } - } - - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := w.newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go deleted file mode 100644 index 063a0915a..000000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ /dev/null @@ -1,782 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - kq, closepipe, err := newKqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// newKqueue creates a new kernel event queue and returns a descriptor. -// -// This registers a new event on closepipe, which will trigger an event when -// it's closed. This way we can use kevent() without timeout/polling; without -// the closepipe, it would block forever and we wouldn't be able to stop it at -// all. -func newKqueue() (kq int, closepipe [2]int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, closepipe, err - } - - // Register the close pipe. - err = unix.Pipe(closepipe[:]) - if err != nil { - unix.Close(kq) - return kq, closepipe, err - } - - // Register changes to listen on the closepipe. - changes := make([]unix.Kevent_t, 1) - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, - unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) - - ok, err := unix.Kevent(kq, changes, nil, nil) - if ok == -1 { - unix.Close(kq) - unix.Close(closepipe[0]) - unix.Close(closepipe[1]) - return kq, closepipe, err - } - return kq, closepipe, nil -} - -// Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { - select { - case w.Events <- e: - return true - case <-w.done: - return false - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.done: - return false - } -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks - for _, name := range pathsToRemove { - w.Remove(name) - } - - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) - close(w.done) - - return nil -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) - - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - return w.remove(name, true) -} - -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) - if err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } - - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error to - // the user, as that will just confuse them with an error about a - // path they did not explicitly watch themselves. - w.Remove(name) - } - } - return nil -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { - return nil - } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// addWatch adds name to the watched file set; the flags are interpreted as -// described in kevent(2). -// -// Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", ErrClosed - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets or named pipes - if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { - return "", nil - } - - // Follow Symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - link, err := os.Readlink(name) - if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - - if alreadyWatching { - // Add to watches so we don't get spurious Create events later - // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} - return link, nil - } - - name = link - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - // Retry on EINTR; open() can return EINTR in practice on macOS. - // See #354, and Go issues 11180 and 39237. - for { - watchfd, err = unix.Open(name, openMode, 0) - if err == nil { - break - } - if errors.Is(err, unix.EINTR) { - continue - } - - return "", err - } - - isDir = fi.IsDir() - } - - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) - if err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - defer func() { - close(w.Events) - close(w.Errors) - _ = unix.Close(w.kq) - unix.Close(w.closepipe[0]) - }() - - eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { - kevents, err := w.read(eventBuffer) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true - } - continue - } - - // Flush the events we received to the Events channel - for _, kevent := range kevents { - var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) - ) - - // Shut down the loop when the pipe is closed, but only after all - // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue - } - - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - - event := w.newEvent(path.name, mask) - - if event.Has(Rename) || event.Has(Remove) { - w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } - } - - if event.Has(Remove) { - // Look for a file that may have overwritten this; for example, - // mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } - } - } - } - } - } - } -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - // No point sending a write and delete event at the same time: if it's gone, - // then it's gone. - if e.Op.Has(Write) && e.Op.Has(Remove) { - e.Op &^= Write - } - return e -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := os.ReadDir(dirPath) - if err != nil { - return err - } - - for _, f := range files { - path := filepath.Join(dirPath, f.Name()) - - fi, err := f.Info() - if err != nil { - return fmt.Errorf("%q: %w", path, err) - } - - cleanPath, err := w.internalWatch(path, fi) - if err != nil { - // No permission to read the file; that's not a problem: just skip. - // But do add it to w.fileExists to prevent it from being picked up - // as a "new" file later (it still shows up in the directory - // listing). - switch { - case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): - cleanPath = filepath.Clean(path) - default: - return fmt.Errorf("%q: %w", path, err) - } - } - - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() - } - - return nil -} - -// Search the directory for new files and send an event for them. -// -// This functionality is to have the BSD watcher match the inotify, which sends -// a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { - files, err := os.ReadDir(dir) - if err != nil { - // Directory no longer exists: we can ignore this safely. kqueue will - // still give us the correct events. - if errors.Is(err, os.ErrNotExist) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - for _, f := range files { - fi, err := f.Info() - if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) - if err != nil { - // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { - return nil - } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) - } - } - return nil -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { - if fi.IsDir() { - // mimic Linux providing delete events for subdirectories, but preserve - // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - for i, fd := range fds { - // SetKevent converts int to the platform-specific types. - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // Register the events. - success, err := unix.Kevent(w.kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(w.kq, nil, events, nil) - if err != nil { - return nil, err - } - return events[0:n], nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go deleted file mode 100644 index d34a23c01..000000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ /dev/null @@ -1,205 +0,0 @@ -//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import "errors" - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("fsnotify not supported on the current platform") -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go deleted file mode 100644 index 9bc91e5d6..000000000 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ /dev/null @@ -1,827 +0,0 @@ -//go:build windows -// +build windows - -// Windows backend based on ReadDirectoryChangesW() -// -// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. - Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. - Errors chan error - - port windows.Handle // Handle to completion port - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error - - mu sync.Mutex // Protects access to watches, closed - watches watchMap // Map of watches (key: i-number) - closed bool // Set to true when Close() is first called -} - -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) -} - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) - if err != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", err) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - w.mu.Lock() - defer w.mu.Unlock() - return w.closed -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - - event := w.newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: - return true - case <-w.quit: - } - return false -} - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - w.mu.Lock() - w.closed = true - w.mu.Unlock() - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - if w.isClosed() { - return ErrClosed - } - - with := getOptions(opts...) - if with.bufsize < 4096 { - return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") - } - - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - bufsize: with.bufsize, - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { - if w.isClosed() { - return nil - } - - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - if w.isClosed() { - return nil - } - - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -// These options are from the old golang.org/x/exp/winfsnotify, where you could -// add various options to the watch. This has long since been removed. -// -// The "sys" in the name is misleading as they're not part of any "system". -// -// This should all be removed at some point, and just use windows.FILE_NOTIFY_* -const ( - sysFSALLEVENTS = 0xfff - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - sysFSIGNORED = 0x8000 -) - -func (w *Watcher) newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - bufsize int - reply chan error -} - -type inode struct { - handle windows.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov windows.Overlapped - ino *inode // i-number - recurse bool // Recursive watch? - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf []byte // buffer, allocated later -} - -type ( - indexMap map[uint64]*watch - watchMap map[uint32]indexMap -) - -func (w *Watcher) wakeupReader() error { - err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if err != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", err) - } - return nil -} - -func (w *Watcher) getDir(pathname string) (dir string, err error) { - attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) - if err != nil { - return "", os.NewSyscallError("GetFileAttributes", err) - } - if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func (w *Watcher) getIno(path string) (ino *inode, err error) { - h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), - windows.FILE_LIST_DIRECTORY, - windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, - nil, windows.OPEN_EXISTING, - windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) - if err != nil { - return nil, os.NewSyscallError("CreateFile", err) - } - - var fi windows.ByHandleFileInformation - err = windows.GetFileInformationByHandle(h, &fi) - if err != nil { - windows.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", err) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - - ino, err := w.getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) - if err != nil { - windows.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", err) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - recurse: recurse, - buf: make([]byte, bufsize), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - windows.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - - err = w.startRead(watchEntry) - if err != nil { - return err - } - - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - pathname, recurse := recursivePath(pathname) - - dir, err := w.getDir(pathname) - if err != nil { - return err - } - ino, err := w.getIno(dir) - if err != nil { - return err - } - - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - - if recurse && !watch.recurse { - return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) - } - - err = windows.CloseHandle(ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - if watch == nil { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - err := windows.CancelIo(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CancelIo", err)) - w.deleteWatch(watch) - } - mask := w.toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= w.toWindowsFlags(m) - } - if mask == 0 { - err := windows.CloseHandle(watch.ino.handle) - if err != nil { - w.sendError(os.NewSyscallError("CloseHandle", err)) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - - // We need to pass the array, rather than the slice. - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) - rdErr := windows.ReadDirectoryChanges(watch.ino.handle, - (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), - watch.recurse, mask, nil, &watch.ov, 0) - if rdErr != nil { - err := os.NewSyscallError("ReadDirectoryChanges", rdErr) - if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n uint32 - key uintptr - ov *windows.Overlapped - ) - runtime.LockOSThread() - - for { - // This error is handled after the watch == nil check below. - qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) - - watch := (*watch)(unsafe.Pointer(ov)) - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - - err := windows.CloseHandle(w.port) - if err != nil { - err = os.NewSyscallError("CloseHandle", err) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch qErr { - case nil: - // No error - case windows.ERROR_MORE_DATA: - if watch == nil { - w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case windows.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case windows.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) - continue - } - - var offset uint32 - for { - if n == 0 { - w.sendError(ErrEventOverflow) - break - } - - // Point "raw" to the event in the buffer - raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - - // Create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := windows.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case windows.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case windows.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case windows.FILE_ACTION_RENAMED_NEW_NAME: - // Update saved path of all sub-watches. - old := filepath.Join(watch.path, watch.rename) - w.mu.Lock() - for _, watchMap := range w.watches { - for _, ww := range watchMap { - if strings.HasPrefix(ww.path, old) { - ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) - } - } - } - w.mu.Unlock() - - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } - if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) - if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) - break - } - } - - if err := w.startRead(watch); err != nil { - w.sendError(err) - } - } -} - -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSMODIFY != 0 { - m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { - switch action { - case windows.FILE_ACTION_ADDED: - return sysFSCREATE - case windows.FILE_ACTION_REMOVED: - return sysFSDELETE - case windows.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case windows.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case windows.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 24c99cc49..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package fsnotify provides a cross-platform interface for file system -// notifications. -// -// Currently supported systems: -// -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN -package fsnotify - -import ( - "errors" - "fmt" - "path/filepath" - "strings" -) - -// Event represents a file system notification. -type Event struct { - // Path to the file or directory. - // - // Paths are relative to the input; for example with Add("dir") the Name - // will be set to "dir/file" if you create that file, but if you use - // Add("/path/to/dir") it will be "/path/to/dir/file". - Name string - - // File operation that triggered the event. - // - // This is a bitmask and some systems may send multiple operations at once. - // Use the Event.Has() method instead of comparing with ==. - Op Op -} - -// Op describes a set of file operations. -type Op uint32 - -// The operations fsnotify can trigger; see the documentation on [Watcher] for a -// full description, and check them with [Event.Has]. -const ( - // A new pathname was created. - Create Op = 1 << iota - - // The pathname was written to; this does *not* mean the write has finished, - // and a write can be followed by more writes. - Write - - // The path was removed; any watches on it will be removed. Some "remove" - // operations may trigger a Rename if the file is actually moved (for - // example "remove to trash" is often a rename). - Remove - - // The path was renamed to something else; any watched on it will be - // removed. - Rename - - // File attributes were changed. - // - // It's generally not recommended to take action on this event, as it may - // get triggered very frequently by some software. For example, Spotlight - // indexing on macOS, anti-virus software, backup software, etc. - Chmod -) - -// Common errors that can be reported. -var ( - ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") -) - -func (o Op) String() string { - var b strings.Builder - if o.Has(Create) { - b.WriteString("|CREATE") - } - if o.Has(Remove) { - b.WriteString("|REMOVE") - } - if o.Has(Write) { - b.WriteString("|WRITE") - } - if o.Has(Rename) { - b.WriteString("|RENAME") - } - if o.Has(Chmod) { - b.WriteString("|CHMOD") - } - if b.Len() == 0 { - return "[no events]" - } - return b.String()[1:] -} - -// Has reports if this operation has the given operation. -func (o Op) Has(h Op) bool { return o&h != 0 } - -// Has reports if this event has the given operation. -func (e Event) Has(op Op) bool { return e.Op.Has(op) } - -// String returns a string representation of the event with their path. -func (e Event) String() string { - return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) -} - -type ( - addOpt func(opt *withOpts) - withOpts struct { - bufsize int - } -) - -var defaultOpts = withOpts{ - bufsize: 65536, // 64K -} - -func getOptions(opts ...addOpt) withOpts { - with := defaultOpts - for _, o := range opts { - o(&with) - } - return with -} - -// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. -// -// This only has effect on Windows systems, and is a no-op for other backends. -// -// The default value is 64K (65536 bytes) which is the highest value that works -// on all filesystems and should be enough for most applications, but if you -// have a large burst of events it may not be enough. You can increase it if -// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). -// -// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -func WithBufferSize(bytes int) addOpt { - return func(opt *withOpts) { opt.bufsize = bytes } -} - -// Check if this path is recursive (ends with "/..." or "\..."), and return the -// path with the /... stripped. -func recursivePath(path string) (string, bool) { - if filepath.Base(path) == "..." { - return filepath.Dir(path), true - } - return path, false -} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go deleted file mode 100644 index 4322b0b88..000000000 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go deleted file mode 100644 index 5da5ffa78..000000000 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml deleted file mode 100644 index 0cffafa7b..000000000 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ /dev/null @@ -1,26 +0,0 @@ -run: - timeout: 1m - tests: true - -linters: - disable-all: true - enable: - - asciicheck - - errcheck - - forcetypeassert - - gocritic - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - typecheck - - unused - -issues: - exclude-use-default: false - max-issues-per-linter: 0 - max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md deleted file mode 100644 index c35696004..000000000 --- a/vendor/github.com/go-logr/logr/CHANGELOG.md +++ /dev/null @@ -1,6 +0,0 @@ -# CHANGELOG - -## v1.0.0-rc1 - -This is the first logged release. Major changes (including breaking changes) -have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md deleted file mode 100644 index 5d37e294c..000000000 --- a/vendor/github.com/go-logr/logr/CONTRIBUTING.md +++ /dev/null @@ -1,17 +0,0 @@ -# Contributing - -Logr is open to pull-requests, provided they fit within the intended scope of -the project. Specifically, this library aims to be VERY small and minimalist, -with no external dependencies. - -## Compatibility - -This project intends to follow [semantic versioning](http://semver.org) and -is very strict about compatibility. Any proposed changes MUST follow those -rules. - -## Performance - -As a logging library, logr must be as light-weight as possible. Any proposed -code change must include results of running the [benchmark](./benchmark) -before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index 8969526a6..000000000 --- a/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,406 +0,0 @@ -# A minimal logging API for Go - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) -[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) - -logr offers an(other) opinion on how Go programs and libraries can do logging -without becoming coupled to a particular logging implementation. This is not -an implementation of logging - it is an API. In fact it is two APIs with two -different sets of users. - -The `Logger` type is intended for application and library authors. It provides -a relatively small API which can be used everywhere you want to emit logs. It -defers the actual act of writing logs (to files, to stdout, or whatever) to the -`LogSink` interface. - -The `LogSink` interface is intended for logging library implementers. It is a -pure interface which can be implemented by logging frameworks to provide the actual logging -functionality. - -This decoupling allows application and library developers to write code in -terms of `logr.Logger` (which has very low dependency fan-out) while the -implementation of logging is managed "up stack" (e.g. in or near `main()`.) -Application developers can then switch out implementations as necessary. - -Many people assert that libraries should not be logging, and as such efforts -like this are pointless. Those people are welcome to convince the authors of -the tens-of-thousands of libraries that *DO* write logs that they are all -wrong. In the meantime, logr takes a more practical approach. - -## Typical usage - -Somewhere, early in an application's life, it will make a decision about which -logging library (implementation) it actually wants to use. Something like: - -``` - func main() { - // ... other setup code ... - - // Create the "root" logger. We have chosen the "logimpl" implementation, - // which takes some initial parameters and returns a logr.Logger. - logger := logimpl.New(param1, param2) - - // ... other setup code ... -``` - -Most apps will call into other libraries, create structures to govern the flow, -etc. The `logr.Logger` object can be passed to these other libraries, stored -in structs, or even used as a package-global variable, if needed. For example: - -``` - app := createTheAppObject(logger) - app.Run() -``` - -Outside of this early setup, no other packages need to know about the choice of -implementation. They write logs in terms of the `logr.Logger` that they -received: - -``` - type appObject struct { - // ... other fields ... - logger logr.Logger - // ... other fields ... - } - - func (app *appObject) Run() { - app.logger.Info("starting up", "timestamp", time.Now()) - - // ... app code ... -``` - -## Background - -If the Go standard library had defined an interface for logging, this project -probably would not be needed. Alas, here we are. - -When the Go developers started developing such an interface with -[slog](https://github.com/golang/go/issues/56345), they adopted some of the -logr design but also left out some parts and changed others: - -| Feature | logr | slog | -|---------|------|------| -| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | -| Low-level API | `LogSink` | `Handler` | -| Stack unwinding | done by `LogSink` | done by `Logger` | -| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | -| Generating a value for logging on demand | `Marshaler` | `LogValuer` | -| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | -| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | -| Passing logger via context | `NewContext`, `FromContext` | no API | -| Adding a name to a logger | `WithName` | no API | -| Modify verbosity of log entries in a call chain | `V` | no API | -| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | -| Pass context for extracting additional values | no API | API variants like `InfoCtx` | - -The high-level slog API is explicitly meant to be one of many different APIs -that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by -some conversion functions. - -### Inspiration - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what -he has to say, and it largely aligns with our own experiences. - -### Differences from Dave's ideas - -The main differences are: - -1. Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. We disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. This -package restricts the logging API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2. Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug." Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -## Implementations (non-exhaustive) - -There are implementations for the following logging libraries: - -- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) -- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) -- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) -- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) -- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) -- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) -- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) - -## slog interoperability - -Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and -`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. -As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. - -### Using a `logr.LogSink` as backend for slog - -Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `SlogSink`. Because -of a conflict in the parameters of the common `Enabled` method, it is [not -possible to implement both slog.Handler and logr.Sink in the same -type](https://github.com/golang/go/issues/59110). - -If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can -convert back and forth without adding additional wrappers, with one exception: -when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future -log calls. - -Such an implementation should also support values that implement specific -interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, -`slog.GroupValue`). logr does not convert those. - -Not supporting slog has several drawbacks: -- Recording source code locations works correctly if the handler gets called - through `slog.Logger`, but may be wrong in other cases. That's because a - `logr.Sink` does its own stack unwinding instead of using the program counter - provided by the high-level API. -- slog levels <= 0 can be mapped to logr levels by negating the level without a - loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as - used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink - because logr does not support "more important than info" levels. -- The slog group concept is supported by prefixing each key in a key/value - pair with the group names, separated by a dot. For structured output like - JSON it would be better to group the key/value pairs inside an object. -- Special slog values and interfaces don't work as expected. -- The overhead is likely to be higher. - -These drawbacks are severe enough that applications using a mixture of slog and -logr should switch to a different backend. - -### Using a `slog.Handler` as backend for logr - -Using a plain `slog.Handler` without support for logr works better than the -other direction: -- All logr verbosity levels can be mapped 1:1 to their corresponding slog level - by negating them. -- Stack unwinding is done by the `SlogSink` and the resulting program - counter is passed to the `slog.Handler`. -- Names added via `Logger.WithName` are gathered and recorded in an additional - attribute with `logger` as key and the names separated by slash as value. -- `Logger.Error` is turned into a log record with `slog.LevelError` as level - and an additional attribute with `err` as key, if an error was provided. - -The main drawback is that `logr.Marshaler` will not be supported. Types should -ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility -with logr implementations without slog support is not important, then -`slog.Valuer` is sufficient. - -### Context support for slog - -Storing a logger in a `context.Context` is not supported by -slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be -used to fill this gap. They store and retrieve a `slog.Logger` pointer -under the same context key that is also used by `NewContext` and -`FromContext` for `logr.Logger` value. - -When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will -automatically convert the `slog.Logger` to a -`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. - -With this approach, binaries which use either slog or logr are as efficient as -possible with no unnecessary allocations. This is also why the API stores a -`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` -on retrieval would need to allocate one. - -The downside is that switching back and forth needs more allocations. Because -logr is the API that is already in use by different packages, in particular -Kubernetes, the recommendation is to use the `logr.Logger` API in code which -uses contextual logging. - -An alternative to adding values to a logger and storing that logger in the -context is to store the values in the context and to configure a logging -backend to extract those values when emitting log entries. This only works when -log calls are passed the context, which is not supported by the logr API. - -With the slog API, it is possible, but not -required. https://github.com/veqryn/slog-context is a package for slog which -provides additional support code for this approach. It also contains wrappers -for the context functions in logr, so developers who prefer to not use the logr -APIs directly can use those instead and the resulting code will still be -interoperable with logr. - -## FAQ - -### Conceptual - -#### Why structured logging? - -- **Structured logs are more easily queryable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc. - -- **Structured logging makes it easier to have cross-referenceable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc., instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects.) Structured logs allow you to preserve that structure when - outputting. - -#### Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -#### Why not named levels, like Info/Warning/Error? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -#### Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc. - -- They don't store structured data well, since contents are flattened into - a string. - -- They're not cross-referenceable. - -- They don't compress easily, since the message is not constant. - -(Unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys.) - -### Practical - -#### Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -#### What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` method to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -#### But I really want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. Figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message. - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair. - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, use it in a key's value, and -call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -#### How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack." - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). For reference, slog pre-defines -4 for debug logs -(corresponds to 4 in logr), which matches what is -[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). - -#### How do I choose my keys? - -Keys are fairly flexible, and can hold more or less any string -value. For best compatibility with implementations and consistency -with existing code in other projects, there are a few conventions you -should consider. - -- Make your keys human-readable. -- Constant keys are generally a good idea. -- Be consistent across your codebase. -- Keys should naturally match parts of the message string. -- Use lower case for simple keys and - [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for - more complex ones. Kubernetes is one example of a project that has - [adopted that - convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -#### Why should keys be constant values? - -The point of structured logging is to make later log processing easier. Your -keys are, effectively, the schema of each log message. If you use different -keys across instances of the same log line, you will make your structured logs -much harder to use. `Sprintf()` is for values, not for keys! - -#### Why is this not a pure interface? - -The Logger type is implemented as a struct in order to allow the Go compiler to -optimize things like high-V `Info` logs that are not triggered. Not all of -these implementations are implemented yet, but this structure was suggested as -a way to ensure they *can* be implemented. All of the real work is behind the -`LogSink` interface. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md deleted file mode 100644 index 1ca756fc7..000000000 --- a/vendor/github.com/go-logr/logr/SECURITY.md +++ /dev/null @@ -1,18 +0,0 @@ -# Security Policy - -If you have discovered a security vulnerability in this project, please report it -privately. **Do not disclose it as a public issue.** This gives us time to work with you -to fix the issue before public exposure, reducing the chance that the exploit will be -used before a patch is released. - -You may submit the report in the following ways: - -- send an email to go-logr-security@googlegroups.com -- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) - -Please provide the following information in your report: - -- A description of the vulnerability and its impact -- How to reproduce the issue - -We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go deleted file mode 100644 index de8bcc3ad..000000000 --- a/vendor/github.com/go-logr/logr/context.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// contextKey is how we find Loggers in a context.Context. With Go < 1.21, -// the value is always a Logger value. With Go >= 1.21, the value can be a -// Logger value or a slog.Logger pointer. -type contextKey struct{} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go deleted file mode 100644 index f012f9a18..000000000 --- a/vendor/github.com/go-logr/logr/context_noslog.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !go1.21 -// +build !go1.21 - -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" -) - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go deleted file mode 100644 index 065ef0b82..000000000 --- a/vendor/github.com/go-logr/logr/context_slog.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "fmt" - "log/slog" -) - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - v := ctx.Value(contextKey{}) - if v == nil { - return Logger{}, notFoundError{} - } - - switch v := v.(type) { - case Logger: - return v, nil - case *slog.Logger: - return FromSlogHandler(v.Handler()), nil - default: - // Not reached. - panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) - } -} - -// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. -func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { - v := ctx.Value(contextKey{}) - if v == nil { - return nil - } - - switch v := v.(type) { - case Logger: - return slog.New(ToSlogHandler(v)) - case *slog.Logger: - return v - default: - // Not reached. - panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) - } -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if logger, err := FromContext(ctx); err == nil { - return logger - } - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - -// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the -// provided slog.Logger. -func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go deleted file mode 100644 index 99fe8be93..000000000 --- a/vendor/github.com/go-logr/logr/discard.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2020 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// Discard returns a Logger that discards all messages logged to it. It can be -// used whenever the caller is not interested in the logs. Logger instances -// produced by this function always compare as equal. -func Discard() Logger { - return New(nil) -} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go deleted file mode 100644 index fb2f866f4..000000000 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ /dev/null @@ -1,911 +0,0 @@ -/* -Copyright 2021 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package funcr implements formatting of structured log messages and -// optionally captures the call site and timestamp. -// -// The simplest way to use it is via its implementation of a -// github.com/go-logr/logr.LogSink with output through an arbitrary -// "write" function. See New and NewJSON for details. -// -// # Custom LogSinks -// -// For users who need more control, a funcr.Formatter can be embedded inside -// your own custom LogSink implementation. This is useful when the LogSink -// needs to implement additional methods, for example. -// -// # Formatting -// -// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for -// values which are being logged. When rendering a struct, funcr will use Go's -// standard JSON tags (all except "string"). -package funcr - -import ( - "bytes" - "encoding" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-logr/logr" -) - -// New returns a logr.Logger which is implemented by an arbitrary function. -func New(fn func(prefix, args string), opts Options) logr.Logger { - return logr.New(newSink(fn, NewFormatter(opts))) -} - -// NewJSON returns a logr.Logger which is implemented by an arbitrary function -// and produces JSON output. -func NewJSON(fn func(obj string), opts Options) logr.Logger { - fnWrapper := func(_, obj string) { - fn(obj) - } - return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) -} - -// Underlier exposes access to the underlying logging function. Since -// callers only have a logr.Logger, they have to know which -// implementation is in use, so this interface is less of an -// abstraction and more of a way to test type conversion. -type Underlier interface { - GetUnderlying() func(prefix, args string) -} - -func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { - l := &fnlogger{ - Formatter: formatter, - write: fn, - } - // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) - return l -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // LogCaller tells funcr to add a "caller" key to some or all log lines. - // This has some overhead, so some users might not want it. - LogCaller MessageClass - - // LogCallerFunc tells funcr to also log the calling function name. This - // has no effect if caller logging is not enabled (see Options.LogCaller). - LogCallerFunc bool - - // LogTimestamp tells funcr to add a "ts" key to log lines. This has some - // overhead, so some users might not want it. - LogTimestamp bool - - // TimestampFormat tells funcr how to render timestamps when LogTimestamp - // is enabled. If not specified, a default format will be used. For more - // details, see docs for Go's time.Layout. - TimestampFormat string - - // LogInfoLevel tells funcr what key to use to log the info level. - // If not specified, the info level will be logged as "level". - // If this is set to "", the info level will not be logged at all. - LogInfoLevel *string - - // Verbosity tells funcr which V logs to produce. Higher values enable - // more logs. Info logs at or below this level will be written, while logs - // above this level will be discarded. - Verbosity int - - // RenderBuiltinsHook allows users to mutate the list of key-value pairs - // while a log line is being rendered. The kvList argument follows logr - // conventions - each pair of slice elements is comprised of a string key - // and an arbitrary value (verified and sanitized before calling this - // hook). The value returned must follow the same conventions. This hook - // can be used to audit or modify logged data. For example, you might want - // to prefix all of funcr's built-in keys with some string. This hook is - // only called for built-in (provided by funcr itself) key-value pairs. - // Equivalent hooks are offered for key-value pairs saved via - // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and - // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []any) []any - - // RenderValuesHook is the same as RenderBuiltinsHook, except that it is - // only called for key-value pairs saved via logr.Logger.WithValues. See - // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []any) []any - - // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only - // called for key-value pairs passed directly to Info and Error. See - // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []any) []any - - // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct - // that contains a struct, etc.) it may log. Every time it finds a struct, - // slice, array, or map the depth is increased by one. When the maximum is - // reached, the value will be converted to a string indicating that the max - // depth has been exceeded. If this field is not specified, a default - // value will be used. - MaxLogDepth int -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// fnlogger inherits some of its LogSink implementation from Formatter -// and just needs to add some glue code. -type fnlogger struct { - Formatter - write func(prefix, args string) -} - -func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l fnlogger) WithValues(kvList ...any) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -func (l fnlogger) Info(level int, msg string, kvList ...any) { - prefix, args := l.FormatInfo(level, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) Error(err error, msg string, kvList ...any) { - prefix, args := l.FormatError(err, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) GetUnderlying() func(prefix, args string) { - return l.write -} - -// Assert conformance to the interfaces. -var _ logr.LogSink = &fnlogger{} -var _ logr.CallDepthLogSink = &fnlogger{} -var _ Underlier = &fnlogger{} - -// NewFormatter constructs a Formatter which emits a JSON-like key=value format. -func NewFormatter(opts Options) Formatter { - return newFormatter(opts, outputKeyValue) -} - -// NewFormatterJSON constructs a Formatter which emits strict JSON. -func NewFormatterJSON(opts Options) Formatter { - return newFormatter(opts, outputJSON) -} - -// Defaults for Options. -const defaultTimestampFormat = "2006-01-02 15:04:05.000000" -const defaultMaxLogDepth = 16 - -func newFormatter(opts Options, outfmt outputFormat) Formatter { - if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFormat - } - if opts.MaxLogDepth == 0 { - opts.MaxLogDepth = defaultMaxLogDepth - } - if opts.LogInfoLevel == nil { - opts.LogInfoLevel = new(string) - *opts.LogInfoLevel = "level" - } - f := Formatter{ - outputFormat: outfmt, - prefix: "", - values: nil, - depth: 0, - opts: &opts, - } - return f -} - -// Formatter is an opaque struct which can be embedded in a LogSink -// implementation. It should be constructed with NewFormatter. Some of -// its methods directly implement logr.LogSink. -type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - parentValuesStr string - depth int - opts *Options - group string // for slog groups - groupDepth int -} - -// outputFormat indicates which outputFormat to use. -type outputFormat int - -const ( - // outputKeyValue emits a JSON-like key=value format, but not strict JSON. - outputKeyValue outputFormat = iota - // outputJSON emits strict JSON. - outputJSON -) - -// PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []any - -// render produces a log line, ready to use. -func (f Formatter) render(builtins, args []any) string { - // Empirically bytes.Buffer is faster than strings.Builder for this. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - if f.outputFormat == outputJSON { - buf.WriteByte('{') // for the whole line - } - - vals := builtins - if hook := f.opts.RenderBuiltinsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape - continuing := len(builtins) > 0 - - if f.parentValuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.parentValuesStr) - continuing = true - } - - groupDepth := f.groupDepth - if f.group != "" { - if f.valuesStr != "" || len(args) != 0 { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } else { - // The group was empty - groupDepth-- - } - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - continuing = true - } - - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - - for i := 0; i < groupDepth; i++ { - buf.WriteByte('}') // for the groups - } - - if f.outputFormat == outputJSON { - buf.WriteByte('}') // for the whole line - } - - return buf.String() -} - -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. -// -// This function returns a potentially modified version of kvList, which -// ensures that there is a value for every key (adding a value if needed) and -// that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { - // This logic overlaps with sanitize() but saves one type-cast per key, - // which can be measurable. - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - copied := false - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - if !copied { - newList := make([]any, len(kvList)) - copy(newList, kvList) - kvList = newList - copied = true - } - k = f.nonStringKey(kvList[i]) - kvList[i] = k - } - v := kvList[i+1] - - if i > 0 || continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(f.comma()) - } else { - // In theory the format could be something we don't understand. In - // practice, we control it, so it won't be. - buf.WriteByte(' ') - } - } - - buf.WriteString(f.quoted(k, escapeKeys)) - buf.WriteByte(f.colon()) - buf.WriteString(f.pretty(v)) - } - return kvList -} - -func (f Formatter) quoted(str string, escape bool) string { - if escape { - return prettyString(str) - } - // this is faster - return `"` + str + `"` -} - -func (f Formatter) comma() byte { - if f.outputFormat == outputJSON { - return ',' - } - return ' ' -} - -func (f Formatter) colon() byte { - if f.outputFormat == outputJSON { - return ':' - } - return '=' -} - -func (f Formatter) pretty(value any) string { - return f.prettyWithFlags(value, 0, 0) -} - -const ( - flagRawStruct = 0x1 // do not print braces on structs -) - -// TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { - if depth > f.opts.MaxLogDepth { - return `""` - } - - // Handle types that take full control of logging. - if v, ok := value.(logr.Marshaler); ok { - // Replace the value with what the type wants to get logged. - // That then gets handled below via reflection. - value = invokeMarshaler(v) - } - - // Handle types that want to format themselves. - switch v := value.(type) { - case fmt.Stringer: - value = invokeStringer(v) - case error: - value = invokeError(v) - } - - // Handling the most common types without reflect is a small perf win. - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case string: - return prettyString(v) - case int: - return strconv.FormatInt(int64(v), 10) - case int8: - return strconv.FormatInt(int64(v), 10) - case int16: - return strconv.FormatInt(int64(v), 10) - case int32: - return strconv.FormatInt(int64(v), 10) - case int64: - return strconv.FormatInt(int64(v), 10) - case uint: - return strconv.FormatUint(uint64(v), 10) - case uint8: - return strconv.FormatUint(uint64(v), 10) - case uint16: - return strconv.FormatUint(uint64(v), 10) - case uint32: - return strconv.FormatUint(uint64(v), 10) - case uint64: - return strconv.FormatUint(v, 10) - case uintptr: - return strconv.FormatUint(uint64(v), 10) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case complex64: - return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` - case complex128: - return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` - case PseudoStruct: - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - v = f.sanitize(v) - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < len(v); i += 2 { - if i > 0 { - buf.WriteByte(f.comma()) - } - k, _ := v[i].(string) // sanitize() above means no need to check success - // arbitrary keys might need escaping - buf.WriteString(prettyString(k)) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - t := reflect.TypeOf(value) - if t == nil { - return "null" - } - v := reflect.ValueOf(value) - switch t.Kind() { - case reflect.Bool: - return strconv.FormatBool(v.Bool()) - case reflect.String: - return prettyString(v.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(int64(v.Int()), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(uint64(v.Uint()), 10) - case reflect.Float32: - return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) - case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64) - case reflect.Complex64: - return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` - case reflect.Complex128: - return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` - case reflect.Struct: - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - printComma := false // testing i>0 is not enough because of JSON omitted fields - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - if fld.PkgPath != "" { - // reflect says this field is only defined for non-exported fields. - continue - } - if !v.Field(i).CanInterface() { - // reflect isn't clear exactly what this means, but we can't use it. - continue - } - name := "" - omitempty := false - if tag, found := fld.Tag.Lookup("json"); found { - if tag == "-" { - continue - } - if comma := strings.Index(tag, ","); comma != -1 { - if n := tag[:comma]; n != "" { - name = n - } - rest := tag[comma:] - if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { - omitempty = true - } - } else { - name = tag - } - } - if omitempty && isEmpty(v.Field(i)) { - continue - } - if printComma { - buf.WriteByte(f.comma()) - } - printComma = true // if we got here, we are rendering a field - if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) - continue - } - if name == "" { - name = fld.Name - } - // field names can't contain characters which need escaping - buf.WriteString(f.quoted(name, false)) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - case reflect.Slice, reflect.Array: - // If this is outputing as JSON make sure this isn't really a json.RawMessage. - // If so just emit "as-is" and don't pretty it as that will just print - // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. - if f.outputFormat == outputJSON { - if rm, ok := value.(json.RawMessage); ok { - // If it's empty make sure we emit an empty value as the array style would below. - if len(rm) > 0 { - buf.Write(rm) - } else { - buf.WriteString("null") - } - return buf.String() - } - } - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(f.comma()) - } - e := v.Index(i) - buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) - } - buf.WriteByte(']') - return buf.String() - case reflect.Map: - buf.WriteByte('{') - // This does not sort the map keys, for best perf. - it := v.MapRange() - i := 0 - for it.Next() { - if i > 0 { - buf.WriteByte(f.comma()) - } - // If a map key supports TextMarshaler, use it. - keystr := "" - if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { - txt, err := m.MarshalText() - if err != nil { - keystr = fmt.Sprintf("", err.Error()) - } else { - keystr = string(txt) - } - keystr = prettyString(keystr) - } else { - // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) - if t.Key().Kind() != reflect.String { - // JSON only does string keys. Unlike Go's standard JSON, we'll - // convert just about anything to a string. - keystr = prettyString(keystr) - } - } - buf.WriteString(keystr) - buf.WriteByte(f.colon()) - buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) - i++ - } - buf.WriteByte('}') - return buf.String() - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return "null" - } - return f.prettyWithFlags(v.Elem().Interface(), 0, depth) - } - return fmt.Sprintf(`""`, t.Kind().String()) -} - -func prettyString(s string) string { - // Avoid escaping (which does allocations) if we can. - if needsEscape(s) { - return strconv.Quote(s) - } - b := bytes.NewBuffer(make([]byte, 0, 1024)) - b.WriteByte('"') - b.WriteString(s) - b.WriteByte('"') - return b.String() -} - -// needsEscape determines whether the input string needs to be escaped or not, -// without doing any allocations. -func needsEscape(s string) bool { - for _, r := range s { - if !strconv.IsPrint(r) || r == '\\' || r == '"' { - return true - } - } - return false -} - -func isEmpty(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func invokeMarshaler(m logr.Marshaler) (ret any) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return m.MarshalLog() -} - -func invokeStringer(s fmt.Stringer) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return s.String() -} - -func invokeError(e error) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return e.Error() -} - -// Caller represents the original call site for a log line, after considering -// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and -// Line fields will always be provided, while the Func field is optional. -// Users can set the render hook fields in Options to examine logged key-value -// pairs, one of which will be {"caller", Caller} if the Options.LogCaller -// field is enabled for the given MessageClass. -type Caller struct { - // File is the basename of the file for this call site. - File string `json:"file"` - // Line is the line number in the file for this call site. - Line int `json:"line"` - // Func is the function name for this call site, or empty if - // Options.LogCallerFunc is not enabled. - Func string `json:"function,omitempty"` -} - -func (f Formatter) caller() Caller { - // +1 for this frame, +1 for Info/Error. - pc, file, line, ok := runtime.Caller(f.depth + 2) - if !ok { - return Caller{"", 0, ""} - } - fn := "" - if f.opts.LogCallerFunc { - if fp := runtime.FuncForPC(pc); fp != nil { - fn = fp.Name() - } - } - - return Caller{filepath.Base(file), line, fn} -} - -const noValue = "" - -func (f Formatter) nonStringKey(v any) string { - return fmt.Sprintf("", f.snippet(v)) -} - -// snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v any) string { - const snipLen = 16 - - snip := f.pretty(v) - if len(snip) > snipLen { - snip = snip[:snipLen] - } - return snip -} - -// sanitize ensures that a list of key-value pairs has a value for every key -// (adding a value if needed) and that each key is a string (substituting a key -// if needed). -func (f Formatter) sanitize(kvList []any) []any { - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - _, ok := kvList[i].(string) - if !ok { - kvList[i] = f.nonStringKey(kvList[i]) - } - } - return kvList -} - -// startGroup opens a new group scope (basically a sub-struct), which locks all -// the current saved values and starts them anew. This is needed to satisfy -// slog. -func (f *Formatter) startGroup(group string) { - // Unnamed groups are just inlined. - if group == "" { - return - } - - // Any saved values can no longer be changed. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - continuing := false - - if f.parentValuesStr != "" { - buf.WriteString(f.parentValuesStr) - continuing = true - } - - if f.group != "" && f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys - buf.WriteByte(f.colon()) - buf.WriteByte('{') // for the group - continuing = false - } - - if f.valuesStr != "" { - if continuing { - buf.WriteByte(f.comma()) - } - buf.WriteString(f.valuesStr) - } - - // NOTE: We don't close the scope here - that's done later, when a log line - // is actually rendered (because we have N scopes to close). - - f.parentValuesStr = buf.String() - - // Start collecting new values. - f.group = group - f.groupDepth++ - f.valuesStr = "" - f.values = nil -} - -// Init configures this Formatter from runtime info, such as the call depth -// imposed by logr itself. -// Note that this receiver is a pointer, so depth can be saved. -func (f *Formatter) Init(info logr.RuntimeInfo) { - f.depth += info.CallDepth -} - -// Enabled checks whether an info message at the given level should be logged. -func (f Formatter) Enabled(level int) bool { - return level <= f.opts.Verbosity -} - -// GetDepth returns the current depth of this Formatter. This is useful for -// implementations which do their own caller attribution. -func (f Formatter) GetDepth() int { - return f.depth -} - -// FormatInfo renders an Info log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { - args := make([]any, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Info { - args = append(args, "caller", f.caller()) - } - if key := *f.opts.LogInfoLevel; key != "" { - args = append(args, key, level) - } - args = append(args, "msg", msg) - return prefix, f.render(args, kvList) -} - -// FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { - args := make([]any, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Error { - args = append(args, "caller", f.caller()) - } - args = append(args, "msg", msg) - var loggableErr any - if err != nil { - loggableErr = err.Error() - } - args = append(args, "error", loggableErr) - return prefix, f.render(args, kvList) -} - -// AddName appends the specified name. funcr uses '/' characters to separate -// name elements. Callers should not pass '/' in the provided name string, but -// this library does not actually enforce that. -func (f *Formatter) AddName(name string) { - if len(f.prefix) > 0 { - f.prefix += "/" - } - f.prefix += name -} - -// AddValues adds key-value pairs to the set of saved values to be logged with -// each log line. -func (f *Formatter) AddValues(kvList []any) { - // Three slice args forces a copy. - n := len(f.values) - f.values = append(f.values[:n:n], kvList...) - - vals := f.values - if hook := f.opts.RenderValuesHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - - // Pre-render values, so we don't have to do it on each Info/Error call. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys - f.valuesStr = buf.String() -} - -// AddCallDepth increases the number of stack-frames to skip when attributing -// the log line to a file and line. -func (f *Formatter) AddCallDepth(depth int) { - f.depth += depth -} diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go deleted file mode 100644 index 7bd84761e..000000000 --- a/vendor/github.com/go-logr/logr/funcr/slogsink.go +++ /dev/null @@ -1,105 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package funcr - -import ( - "context" - "log/slog" - - "github.com/go-logr/logr" -) - -var _ logr.SlogSink = &fnlogger{} - -const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink - -func (l fnlogger) Handle(_ context.Context, record slog.Record) error { - kvList := make([]any, 0, 2*record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - kvList = attrToKVs(attr, kvList) - return true - }) - - if record.Level >= slog.LevelError { - l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) - } else { - level := l.levelFromSlog(record.Level) - l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) - } - return nil -} - -func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { - kvList := make([]any, 0, 2*len(attrs)) - for _, attr := range attrs { - kvList = attrToKVs(attr, kvList) - } - l.AddValues(kvList) - return &l -} - -func (l fnlogger) WithGroup(name string) logr.SlogSink { - l.startGroup(name) - return &l -} - -// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups -// and other details of slog. -func attrToKVs(attr slog.Attr, kvList []any) []any { - attrVal := attr.Value.Resolve() - if attrVal.Kind() == slog.KindGroup { - groupVal := attrVal.Group() - grpKVs := make([]any, 0, 2*len(groupVal)) - for _, attr := range groupVal { - grpKVs = attrToKVs(attr, grpKVs) - } - if attr.Key == "" { - // slog says we have to inline these - kvList = append(kvList, grpKVs...) - } else { - kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) - } - } else if attr.Key != "" { - kvList = append(kvList, attr.Key, attrVal.Any()) - } - - return kvList -} - -// levelFromSlog adjusts the level by the logger's verbosity and negates it. -// It ensures that the result is >= 0. This is necessary because the result is -// passed to a LogSink and that API did not historically document whether -// levels could be negative or what that meant. -// -// Some example usage: -// -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) -func (l fnlogger) levelFromSlog(level slog.Level) int { - result := -level - if result < 0 { - result = 0 // because LogSink doesn't expect negative V levels - } - return int(result) -} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index b4428e105..000000000 --- a/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,520 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging - -// Package logr defines a general-purpose logging API and abstract interfaces -// to back that API. Packages in the Go ecosystem can depend on this package, -// while callers can implement logging with whatever backend is appropriate. -// -// # Usage -// -// Logging is done using a Logger instance. Logger is a concrete type with -// methods, which defers the actual logging to a LogSink interface. The main -// methods of Logger are Info() and Error(). Arguments to Info() and Error() -// are key/value pairs rather than printf-style formatted strings, emphasizing -// "structured logging". -// -// With Go's standard log package, we might write: -// -// log.Printf("setting target value %s", targetValue) -// -// With logr's structured logging, we'd write: -// -// logger.Info("setting target", "value", targetValue) -// -// Errors are much the same. Instead of: -// -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) -// -// We'd write: -// -// logger.Error(err, "failed to open the pod bay door", "user", user) -// -// Info() and Error() are very similar, but they are separate methods so that -// LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). Error() messages are -// always logged, regardless of the current verbosity. If there is no error -// instance available, passing nil is valid. -// -// # Verbosity -// -// Often we want to log information only when the application in "verbose -// mode". To write log lines that are more verbose, Logger has a V() method. -// The higher the V-level of a log line, the less critical it is considered. -// Log-lines with V-levels that are not enabled (as per the LogSink) will not -// be written. Level V(0) is the default, and logger.V(0).Info() has the same -// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). -// Error messages do not have a verbosity level and are always logged. -// -// Where we might have written: -// -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } -// -// We can write: -// -// logger.V(2).Info("an unusual thing happened") -// -// # Logger Names -// -// Logger instances can have name strings so that all messages logged through -// that instance have additional context. For example, you might want to add -// a subsystem name: -// -// logger.WithName("compactor").Info("started", "time", time.Now()) -// -// The WithName() method returns a new Logger, which can be passed to -// constructors or other functions for further use. Repeated use of WithName() -// will accumulate name "segments". These name segments will be joined in some -// way by the LogSink implementation. It is strongly recommended that name -// segments contain simple identifiers (letters, digits, and hyphen), and do -// not contain characters that could muddle the log output or confuse the -// joining operation (e.g. whitespace, commas, periods, slashes, brackets, -// quotes, etc). -// -// # Saved Values -// -// Logger instances can store any number of key/value pairs, which will be -// logged alongside all messages logged through that instance. For example, -// you might want to create a Logger instance per managed object: -// -// With the standard log package, we might write: -// -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr we'd write: -// -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) -// -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) -// -// # Best Practices -// -// Logger has very few hard rules, with the goal that LogSink implementations -// might have a lot of freedom to differentiate. There are, however, some -// things to consider. -// -// The log message consists of a constant message attached to the log line. -// This should generally be a simple description of what's occurring, and should -// never be a format string. Variable information can then be attached using -// named values. -// -// Keys are arbitrary strings, but should generally be constant values. Values -// may be any Go value, but how the value is formatted is determined by the -// LogSink implementation. -// -// Logger instances are meant to be passed around by value. Code that receives -// such a value can call its methods without having to check whether the -// instance is ready for use. -// -// The zero logger (= Logger{}) is identical to Discard() and discards all log -// entries. Code that receives a Logger by value can simply call it, the methods -// will never crash. For cases where passing a logger is optional, a pointer to Logger -// should be used. -// -// # Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// - be human-readable and meaningful (not auto-generated or simple ordinals) -// - be constant (not dependent on input data) -// - contain only printable characters -// - not contain whitespace or punctuation -// - use lower case for simple keys and lowerCamelCase for more complex ones -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// - "caller": the calling information (file/line) of a particular log line -// - "error": the underlying error value in the `Error` method -// - "level": the log level -// - "logger": the name of the associated logger -// - "msg": the log message -// - "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// - "ts": the timestamp for a log line -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when necessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -// -// # Break Glass -// -// Implementations may choose to give callers access to the underlying -// logging implementation. The recommended pattern for this is: -// -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } -// -// Logger grants access to the sink to enable type assertions like this: -// -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink().(impl.Underlier); ok { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } -// -// Custom `With*` functions can be implemented by copying the complete -// Logger struct and replacing the sink in the copy: -// -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } -// -// Don't use New to construct a new Logger with a LogSink retrieved from an -// existing Logger. Source code attribution might not work correctly and -// unexported fields in Logger get lost. -// -// Beware that the same LogSink instance may be shared by different logger -// instances. Calling functions that modify the LogSink will affect all of -// those. -package logr - -// New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. Passing a nil sink will create -// a Logger which discards all log lines. -func New(sink LogSink) Logger { - logger := Logger{} - logger.setSink(sink) - if sink != nil { - sink.Init(runtimeInfo) - } - return logger -} - -// setSink stores the sink and updates any related fields. It mutates the -// logger and thus is only safe to use for loggers that are not currently being -// used concurrently. -func (l *Logger) setSink(sink LogSink) { - l.sink = sink -} - -// GetSink returns the stored sink. -func (l Logger) GetSink() LogSink { - return l.sink -} - -// WithSink returns a copy of the logger with the new sink. -func (l Logger) WithSink(sink LogSink) Logger { - l.setSink(sink) - return l -} - -// Logger is an interface to an abstract logging implementation. This is a -// concrete type for performance reasons, but all the real work is passed on to -// a LogSink. Implementations of LogSink should provide their own constructors -// that return Logger, not LogSink. -// -// The underlying sink can be accessed through GetSink and be modified through -// WithSink. This enables the implementation of custom extensions (see "Break -// Glass" in the package documentation). Normally the sink should be used only -// indirectly. -type Logger struct { - sink LogSink - level int -} - -// Enabled tests whether this Logger is enabled. For example, commandline -// flags might be used to set the logging verbosity and disable some info logs. -func (l Logger) Enabled() bool { - // Some implementations of LogSink look at the caller in Enabled (e.g. - // different verbosity levels per package or file), but we only pass one - // CallDepth in (via Init). This means that all calls from Logger to the - // LogSink's Enabled, Info, and Error methods must have the same number of - // frames. In other words, Logger methods can't call other Logger methods - // which call these LogSink methods unless we do it the same in all paths. - return l.sink != nil && l.sink.Enabled(l.level) -} - -// Info logs a non-error message with the given key/value pairs as context. -// -// The msg argument should be used to add some constant description to the log -// line. The key/value pairs can then be used to add additional variable -// information. The key/value pairs must alternate string keys and arbitrary -// values. -func (l Logger) Info(msg string, keysAndValues ...any) { - if l.sink == nil { - return - } - if l.sink.Enabled(l.level) { // see comment in Enabled - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Info(l.level, msg, keysAndValues...) - } -} - -// Error logs an error, with the given message and key/value pairs as context. -// It functions similarly to Info, but may have unique behavior, and should be -// preferred for logging errors (see the package documentations for more -// information). The log message will always be emitted, regardless of -// verbosity level. -// -// The msg argument should be used to add context to any underlying error, -// while the err argument should be used to attach the actual error that -// triggered this log line, if present. The err parameter is optional -// and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...any) { - if l.sink == nil { - return - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - withHelper.GetCallStackHelper()() - } - l.sink.Error(err, msg, keysAndValues...) -} - -// V returns a new Logger instance for a specific verbosity level, relative to -// this Logger. In other words, V-levels are additive. A higher verbosity -// level means a log message is less important. Negative V-levels are treated -// as 0. -func (l Logger) V(level int) Logger { - if l.sink == nil { - return l - } - if level < 0 { - level = 0 - } - l.level += level - return l -} - -// GetV returns the verbosity level of the logger. If the logger's LogSink is -// nil as in the Discard logger, this will always return 0. -func (l Logger) GetV() int { - // 0 if l.sink nil because of the if check in V above. - return l.level -} - -// WithValues returns a new Logger instance with additional key/value pairs. -// See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...any) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithValues(keysAndValues...)) - return l -} - -// WithName returns a new Logger instance with the specified name element added -// to the Logger's name. Successive calls with WithName append additional -// suffixes to the Logger's name. It's strongly recommended that name segments -// contain only letters, digits, and hyphens (see the package documentation for -// more information). -func (l Logger) WithName(name string) Logger { - if l.sink == nil { - return l - } - l.setSink(l.sink.WithName(name)) - return l -} - -// WithCallDepth returns a Logger instance that offsets the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// it will be called and the result returned. If the implementation does not -// support CallDepthLogSink, the original Logger will be returned. -// -// To skip one level, WithCallStackHelper() should be used instead of -// WithCallDepth(1) because it works with implementions that support the -// CallDepthLogSink and/or CallStackHelperLogSink interfaces. -func (l Logger) WithCallDepth(depth int) Logger { - if l.sink == nil { - return l - } - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(depth)) - } - return l -} - -// WithCallStackHelper returns a new Logger instance that skips the direct -// caller when logging call site information, if possible. This is useful for -// users who have helper functions between the "real" call site and the actual -// calls to Logger methods and want to support loggers which depend on marking -// each individual helper function, like loggers based on testing.T. -// -// In addition to using that new logger instance, callers also must call the -// returned function. -// -// If the underlying log implementation supports a WithCallDepth(int) method, -// WithCallDepth(1) will be called to produce a new logger. If it supports a -// WithCallStackHelper() method, that will be also called. If the -// implementation does not support either of these, the original Logger will be -// returned. -func (l Logger) WithCallStackHelper() (func(), Logger) { - if l.sink == nil { - return func() {}, l - } - var helper func() - if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { - l.setSink(withCallDepth.WithCallDepth(1)) - } - if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { - helper = withHelper.GetCallStackHelper() - } else { - helper = func() {} - } - return helper, l -} - -// IsZero returns true if this logger is an uninitialized zero value -func (l Logger) IsZero() bool { - return l.sink == nil -} - -// RuntimeInfo holds information that the logr "core" library knows which -// LogSinks might want to know. -type RuntimeInfo struct { - // CallDepth is the number of call frames the logr library adds between the - // end-user and the LogSink. LogSink implementations which choose to print - // the original logging site (e.g. file & line) should climb this many - // additional frames to find it. - CallDepth int -} - -// runtimeInfo is a static global. It must not be changed at run time. -var runtimeInfo = RuntimeInfo{ - CallDepth: 1, -} - -// LogSink represents a logging implementation. End-users will generally not -// interact with this type. -type LogSink interface { - // Init receives optional information about the logr library for LogSink - // implementations that need it. - Init(info RuntimeInfo) - - // Enabled tests whether this LogSink is enabled at the specified V-level. - // For example, commandline flags might be used to set the logging - // verbosity and disable some info logs. - Enabled(level int) bool - - // Info logs a non-error message with the given key/value pairs as context. - // The level argument is provided for optional logging. This method will - // only be called when Enabled(level) is true. See Logger.Info for more - // details. - Info(level int, msg string, keysAndValues ...any) - - // Error logs an error, with the given message and key/value pairs as - // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...any) - - // WithValues returns a new LogSink with additional key/value pairs. See - // Logger.WithValues for more details. - WithValues(keysAndValues ...any) LogSink - - // WithName returns a new LogSink with the specified name appended. See - // Logger.WithName for more details. - WithName(name string) LogSink -} - -// CallDepthLogSink represents a LogSink that knows how to climb the call stack -// to identify the original call site and can offset the depth by a specified -// number of frames. This is useful for users who have helper functions -// between the "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as file, -// function, or line) would otherwise log information about the intermediate -// helper functions. -// -// This is an optional interface and implementations are not required to -// support it. -type CallDepthLogSink interface { - // WithCallDepth returns a LogSink that will offset the call - // stack by the specified number of frames when logging call - // site information. - // - // If depth is 0, the LogSink should skip exactly the number - // of call frames defined in RuntimeInfo.CallDepth when Info - // or Error are called, i.e. the attribution should be to the - // direct caller of Logger.Info or Logger.Error. - // - // If depth is 1 the attribution should skip 1 call frame, and so on. - // Successive calls to this are additive. - WithCallDepth(depth int) LogSink -} - -// CallStackHelperLogSink represents a LogSink that knows how to climb -// the call stack to identify the original call site and can skip -// intermediate helper functions if they mark themselves as -// helper. Go's testing package uses that approach. -// -// This is useful for users who have helper functions between the -// "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as -// file, function, or line) would otherwise log information about the -// intermediate helper functions. -// -// This is an optional interface and implementations are not required -// to support it. Implementations that choose to support this must not -// simply implement it as WithCallDepth(1), because -// Logger.WithCallStackHelper will call both methods if they are -// present. This should only be implemented for LogSinks that actually -// need it, as with testing.T. -type CallStackHelperLogSink interface { - // GetCallStackHelper returns a function that must be called - // to mark the direct caller as helper function when logging - // call site information. - GetCallStackHelper() func() -} - -// Marshaler is an optional interface that logged values may choose to -// implement. Loggers with structured output, such as JSON, should -// log the object return by the MarshalLog method instead of the -// original value. -type Marshaler interface { - // MarshalLog can be used to: - // - ensure that structs are not logged as strings when the original - // value has a String method: return a different type without a - // String method - // - select which fields of a complex type should get logged: - // return a simpler struct with fewer fields - // - log unexported fields: return a different struct - // with exported fields - // - // It may return any value of any type. - MarshalLog() any -} diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go deleted file mode 100644 index 82d1ba494..000000000 --- a/vendor/github.com/go-logr/logr/sloghandler.go +++ /dev/null @@ -1,192 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" -) - -type slogHandler struct { - // May be nil, in which case all logs get discarded. - sink LogSink - // Non-nil if sink is non-nil and implements SlogSink. - slogSink SlogSink - - // groupPrefix collects values from WithGroup calls. It gets added as - // prefix to value keys when handling a log record. - groupPrefix string - - // levelBias can be set when constructing the handler to influence the - // slog.Level of log records. A positive levelBias reduces the - // slog.Level value. slog has no API to influence this value after the - // handler got created, so it can only be set indirectly through - // Logger.V. - levelBias slog.Level -} - -var _ slog.Handler = &slogHandler{} - -// groupSeparator is used to concatenate WithGroup names and attribute keys. -const groupSeparator = "." - -// GetLevel is used for black box unit testing. -func (l *slogHandler) GetLevel() slog.Level { - return l.levelBias -} - -func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { - return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) -} - -func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { - if l.slogSink != nil { - // Only adjust verbosity level of log entries < slog.LevelError. - if record.Level < slog.LevelError { - record.Level -= l.levelBias - } - return l.slogSink.Handle(ctx, record) - } - - // No need to check for nil sink here because Handle will only be called - // when Enabled returned true. - - kvList := make([]any, 0, 2*record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - kvList = attrToKVs(attr, l.groupPrefix, kvList) - return true - }) - if record.Level >= slog.LevelError { - l.sinkWithCallDepth().Error(nil, record.Message, kvList...) - } else { - level := l.levelFromSlog(record.Level) - l.sinkWithCallDepth().Info(level, record.Message, kvList...) - } - return nil -} - -// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info -// are called by Handle, code in slog gets skipped. -// -// This offset currently (Go 1.21.0) works for calls through -// slog.New(ToSlogHandler(...)). There's no guarantee that the call -// chain won't change. Wrapping the handler will also break unwinding. It's -// still better than not adjusting at all.... -// -// This cannot be done when constructing the handler because FromSlogHandler needs -// access to the original sink without this adjustment. A second copy would -// work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() LogSink { - if sink, ok := l.sink.(CallDepthLogSink); ok { - return sink.WithCallDepth(2) - } - return l.sink -} - -func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - if l.sink == nil || len(attrs) == 0 { - return l - } - - clone := *l - if l.slogSink != nil { - clone.slogSink = l.slogSink.WithAttrs(attrs) - clone.sink = clone.slogSink - } else { - kvList := make([]any, 0, 2*len(attrs)) - for _, attr := range attrs { - kvList = attrToKVs(attr, l.groupPrefix, kvList) - } - clone.sink = l.sink.WithValues(kvList...) - } - return &clone -} - -func (l *slogHandler) WithGroup(name string) slog.Handler { - if l.sink == nil { - return l - } - if name == "" { - // slog says to inline empty groups - return l - } - clone := *l - if l.slogSink != nil { - clone.slogSink = l.slogSink.WithGroup(name) - clone.sink = clone.slogSink - } else { - clone.groupPrefix = addPrefix(clone.groupPrefix, name) - } - return &clone -} - -// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups -// and other details of slog. -func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { - attrVal := attr.Value.Resolve() - if attrVal.Kind() == slog.KindGroup { - groupVal := attrVal.Group() - grpKVs := make([]any, 0, 2*len(groupVal)) - prefix := groupPrefix - if attr.Key != "" { - prefix = addPrefix(groupPrefix, attr.Key) - } - for _, attr := range groupVal { - grpKVs = attrToKVs(attr, prefix, grpKVs) - } - kvList = append(kvList, grpKVs...) - } else if attr.Key != "" { - kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) - } - - return kvList -} - -func addPrefix(prefix, name string) string { - if prefix == "" { - return name - } - if name == "" { - return prefix - } - return prefix + groupSeparator + name -} - -// levelFromSlog adjusts the level by the logger's verbosity and negates it. -// It ensures that the result is >= 0. This is necessary because the result is -// passed to a LogSink and that API did not historically document whether -// levels could be negative or what that meant. -// -// Some example usage: -// -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) -func (l *slogHandler) levelFromSlog(level slog.Level) int { - result := -level - result += l.levelBias // in case the original Logger had a V level - if result < 0 { - result = 0 // because LogSink doesn't expect negative V levels - } - return int(result) -} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go deleted file mode 100644 index 28a83d024..000000000 --- a/vendor/github.com/go-logr/logr/slogr.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" -) - -// FromSlogHandler returns a Logger which writes to the slog.Handler. -// -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. -func FromSlogHandler(handler slog.Handler) Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return Discard() - } - return New(handler.sink).V(int(handler.levelBias)) - } - return New(&slogSink{handler: handler}) -} - -// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. -// -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the Logger: -// -// logger := -// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) -func ToSlogHandler(logger Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } - - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler -} - -// SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: -// -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in FromSlogHandler -// and ToSlogHandler. -type SlogSink interface { - LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go deleted file mode 100644 index 4060fcbc2..000000000 --- a/vendor/github.com/go-logr/logr/slogsink.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build go1.21 -// +build go1.21 - -/* -Copyright 2023 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -import ( - "context" - "log/slog" - "runtime" - "time" -) - -var ( - _ LogSink = &slogSink{} - _ CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} -) - -// Underlier is implemented by the LogSink returned by NewFromLogHandler. -type Underlier interface { - // GetUnderlying returns the Handler used by the LogSink. - GetUnderlying() slog.Handler -} - -const ( - // nameKey is used to log the `WithName` values as an additional attribute. - nameKey = "logger" - - // errKey is used to log the error parameter of Error as an additional attribute. - errKey = "err" -) - -type slogSink struct { - callDepth int - name string - handler slog.Handler -} - -func (l *slogSink) Init(info RuntimeInfo) { - l.callDepth = info.CallDepth -} - -func (l *slogSink) GetUnderlying() slog.Handler { - return l.handler -} - -func (l *slogSink) WithCallDepth(depth int) LogSink { - newLogger := *l - newLogger.callDepth += depth - return &newLogger -} - -func (l *slogSink) Enabled(level int) bool { - return l.handler.Enabled(context.Background(), slog.Level(-level)) -} - -func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { - l.log(nil, msg, slog.Level(-level), kvList...) -} - -func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { - l.log(err, msg, slog.LevelError, kvList...) -} - -func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { - var pcs [1]uintptr - // skip runtime.Callers, this function, Info/Error, and all helper functions above that. - runtime.Callers(3+l.callDepth, pcs[:]) - - record := slog.NewRecord(time.Now(), level, msg, pcs[0]) - if l.name != "" { - record.AddAttrs(slog.String(nameKey, l.name)) - } - if err != nil { - record.AddAttrs(slog.Any(errKey, err)) - } - record.Add(kvList...) - _ = l.handler.Handle(context.Background(), record) -} - -func (l slogSink) WithName(name string) LogSink { - if l.name != "" { - l.name += "/" - } - l.name += name - return &l -} - -func (l slogSink) WithValues(kvList ...interface{}) LogSink { - l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) - return &l -} - -func kvListToAttrs(kvList ...interface{}) []slog.Attr { - // We don't need the record itself, only its Add method. - record := slog.NewRecord(time.Time{}, 0, "", 0) - record.Add(kvList...) - attrs := make([]slog.Attr, 0, record.NumAttrs()) - record.Attrs(func(attr slog.Attr) bool { - attrs = append(attrs, attr) - return true - }) - return attrs -} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/go-logr/stdr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md deleted file mode 100644 index 515866789..000000000 --- a/vendor/github.com/go-logr/stdr/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal Go logging using logr and Go's standard library - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go deleted file mode 100644 index 93a8aab51..000000000 --- a/vendor/github.com/go-logr/stdr/stdr.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stdr implements github.com/go-logr/logr.Logger in terms of -// Go's standard log package. -package stdr - -import ( - "log" - "os" - - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" -) - -// The global verbosity level. See SetVerbosity(). -var globalVerbosity int - -// SetVerbosity sets the global level against which all info logs will be -// compared. If this is greater than or equal to the "V" of the logger, the -// message will be logged. A higher value here means more logs will be written. -// The previous verbosity value is returned. This is not concurrent-safe - -// callers must be sure to call it from only one goroutine. -func SetVerbosity(v int) int { - old := globalVerbosity - globalVerbosity = v - return old -} - -// New returns a logr.Logger which is implemented by Go's standard log package, -// or something like it. If std is nil, this will use a default logger -// instead. -// -// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -func New(std StdLogger) logr.Logger { - return NewWithOptions(std, Options{}) -} - -// NewWithOptions returns a logr.Logger which is implemented by Go's standard -// log package, or something like it. See New for details. -func NewWithOptions(std StdLogger, opts Options) logr.Logger { - if std == nil { - // Go's log.Default() is only available in 1.16 and higher. - std = log.New(os.Stderr, "", log.LstdFlags) - } - - if opts.Depth < 0 { - opts.Depth = 0 - } - - fopts := funcr.Options{ - LogCaller: funcr.MessageClass(opts.LogCaller), - } - - sl := &logger{ - Formatter: funcr.NewFormatter(fopts), - std: std, - } - - // For skipping our own logger.Info/Error. - sl.Formatter.AddCallDepth(1 + opts.Depth) - - return logr.New(sl) -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // Depth biases the assumed number of call frames to the "true" caller. - // This is useful when the calling code calls a function which then calls - // stdr (e.g. a logging shim to another API). Values less than zero will - // be treated as zero. - Depth int - - // LogCaller tells stdr to add a "caller" key to some or all log lines. - // Go's log package has options to log this natively, too. - LogCaller MessageClass - - // TODO: add an option to log the date/time -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// StdLogger is the subset of the Go stdlib log.Logger API that is needed for -// this adapter. -type StdLogger interface { - // Output is the same as log.Output and log.Logger.Output. - Output(calldepth int, logline string) error -} - -type logger struct { - funcr.Formatter - std StdLogger -} - -var _ logr.LogSink = &logger{} -var _ logr.CallDepthLogSink = &logger{} - -func (l logger) Enabled(level int) bool { - return globalVerbosity >= level -} - -func (l logger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l logger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l logger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -// Underlier exposes access to the underlying logging implementation. Since -// callers only have a logr.Logger, they have to know which implementation is -// in use, so this interface is less of an abstraction and more of way to test -// type conversion. -type Underlier interface { - GetUnderlying() StdLogger -} - -// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger -// is itself an interface, the result may or may not be a Go log.Logger. -func (l logger) GetUnderlying() StdLogger { - return l.std -} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml deleted file mode 100644 index 28f740cd5..000000000 --- a/vendor/github.com/go-ole/go-ole/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -sudo: false - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md deleted file mode 100644 index 4ba6a8c64..000000000 --- a/vendor/github.com/go-ole/go-ole/ChangeLog.md +++ /dev/null @@ -1,49 +0,0 @@ -# Version 1.x.x - -* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) - -# Version 1.2.0-alphaX - -**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** - - * Added CI configuration for Travis-CI and AppVeyor. - * Added test InterfaceID and ClassID for the COM Test Server project. - * Added more inline documentation (#83). - * Added IEnumVARIANT implementation (#88). - * Added IEnumVARIANT test cases (#99, #100, #101). - * Added support for retrieving `time.Time` from VARIANT (#92). - * Added test case for IUnknown (#64). - * Added test case for IDispatch (#64). - * Added test cases for scalar variants (#64, #76). - -# Version 1.1.1 - - * Fixes for Linux build. - * Fixes for Windows build. - -# Version 1.1.0 - -The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. - - * Move GUID out of variables.go into its own file to make new documentation available. - * Move OleError out of ole.go into its own file to make new documentation available. - * Add documentation to utility functions. - * Add documentation to variant receiver functions. - * Add documentation to ole structures. - * Make variant available to other systems outside of Windows. - * Make OLE structures available to other systems outside of Windows. - -## New Features - - * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. - * More functions are now documented and available on godoc.org. - -# Version 1.0.1 - - 1. Fix package references from repository location change. - -# Version 1.0.0 - -This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. - -There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE deleted file mode 100644 index 623ec06f9..000000000 --- a/vendor/github.com/go-ole/go-ole/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright © 2013-2017 Yasuhiro Matsumoto, - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Softwareâ€), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md deleted file mode 100644 index 7b577558d..000000000 --- a/vendor/github.com/go-ole/go-ole/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Go OLE - -[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) -[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) -[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) - -Go bindings for Windows COM using shared libraries instead of cgo. - -By Yasuhiro Matsumoto. - -## Install - -To experiment with go-ole, you can just compile and run the example program: - -``` -go get github.com/go-ole/go-ole -cd /path/to/go-ole/ -go test - -cd /path/to/go-ole/example/excel -go run excel.go -``` - -## Continuous Integration - -Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. - -**Travis-CI** - -Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. - -**AppVeyor** - -AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. - -The tests currently do run and do pass and this should be maintained with commits. - -## Versioning - -Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. - -This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. - -## LICENSE - -Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml deleted file mode 100644 index 0d557ac2f..000000000 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Notes: -# - Minimal appveyor.yml file is an empty file. All sections are optional. -# - Indent each level of configuration with 2 spaces. Do not use tabs! -# - All section names are case-sensitive. -# - Section names should be unique on each level. - -version: "1.3.0.{build}-alpha-{branch}" - -os: Windows Server 2012 R2 - -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 - -skip_tags: true - -clone_folder: c:\gopath\src\github.com\go-ole\go-ole - -environment: - GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" - -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% - # - Download COM Server - - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set - - go version - - go env - - go get -u golang.org/x/tools/cmd/cover - - go get -u golang.org/x/tools/cmd/godoc - - go get -u golang.org/x/tools/cmd/stringer - -build_script: - - cd c:\gopath\src\github.com\go-ole\go-ole - - go get -v -t ./... - - go build - - go test -v -cover ./... - -# disable automatic tests -test: off - -# disable deployment -deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go deleted file mode 100644 index a9bef150a..000000000 --- a/vendor/github.com/go-ole/go-ole/com.go +++ /dev/null @@ -1,344 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - procCoInitialize = modole32.NewProc("CoInitialize") - procCoInitializeEx = modole32.NewProc("CoInitializeEx") - procCoUninitialize = modole32.NewProc("CoUninitialize") - procCoCreateInstance = modole32.NewProc("CoCreateInstance") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromCLSID = modole32.NewProc("StringFromCLSID") - procStringFromIID = modole32.NewProc("StringFromIID") - procIIDFromString = modole32.NewProc("IIDFromString") - procCoGetObject = modole32.NewProc("CoGetObject") - procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") - procCopyMemory = modkernel32.NewProc("RtlMoveMemory") - procVariantInit = modoleaut32.NewProc("VariantInit") - procVariantClear = modoleaut32.NewProc("VariantClear") - procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime") - procSysAllocString = modoleaut32.NewProc("SysAllocString") - procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen") - procSysFreeString = modoleaut32.NewProc("SysFreeString") - procSysStringLen = modoleaut32.NewProc("SysStringLen") - procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") - procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") - procGetActiveObject = modoleaut32.NewProc("GetActiveObject") - - procGetMessageW = moduser32.NewProc("GetMessageW") - procDispatchMessageW = moduser32.NewProc("DispatchMessageW") -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx - // Suggests that no value should be passed to CoInitialized. - // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. - hr, _, _ := procCoInitialize.Call(uintptr(0)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx - // Suggests that the first parameter is not only optional but should always be NULL. - hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) (err error) { - // p is ignored and won't be used. - // Avoid any variable not used errors. - p = uintptr(0) - return coInitialize() -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) (err error) { - // Avoid any variable not used errors. - p = uintptr(0) - return coInitializeEx(coinit) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() { - procCoUninitialize.Call() -} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) { - procCoTaskMemFree.Call(memptr) -} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (clsid *GUID, err error) { - var guid GUID - lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) - hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procCoCreateInstance.Call( - uintptr(unsafe.Pointer(clsid)), - 0, - CLSCTX_SERVER, - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procGetActiveObject.Call( - uintptr(unsafe.Pointer(clsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -type BindOpts struct { - CbStruct uint32 - GrfFlags uint32 - GrfMode uint32 - TickCountDeadline uint32 -} - -// GetObject retrieves pointer to active object. -func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { - if bindOpts != nil { - bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) - } - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procCoGetObject.Call( - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), - uintptr(unsafe.Pointer(bindOpts)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) (err error) { - hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) (err error) { - hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) (ss *int16) { - pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) (ss *int16) { - utf16 := utf16.Encode([]rune(v + "\x00")) - ptr := &utf16[0] - - pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) (err error) { - hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) - return uint32(l) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { - hr, _, _ := procCreateStdDispatch.Call( - uintptr(unsafe.Pointer(unk)), - v, - uintptr(unsafe.Pointer(ptinfo)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { - hr, _, _ := procCreateDispTypeInfo.Call( - uintptr(unsafe.Pointer(idata)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&pptinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { - procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) -} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() (lcid uint32) { - ret, _, _ := procGetUserDefaultLCID.Call() - lcid = uint32(ret) - return -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { - r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) - ret = int32(r0) - return -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) (ret int32) { - r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) - ret = int32(r0) - return -} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go deleted file mode 100644 index cef539d9d..000000000 --- a/vendor/github.com/go-ole/go-ole/com_func.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build !windows - -package ole - -import ( - "time" - "unsafe" -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() error { - return NewError(E_NOTIMPL) -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) error { - return NewError(E_NOTIMPL) -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() {} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) {} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) *int16 { - u := int16(0) - return &u -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) *int16 { - u := int16(0) - return &u -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) error { - return NewError(E_NOTIMPL) -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - return uint32(0) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { - return nil, NewError(E_NOTIMPL) -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() uint32 { - return uint32(0) -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) int32 { - return int32(0) -} - -func GetVariantDate(value uint64) (time.Time, error) { - return time.Now(), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go deleted file mode 100644 index b2ac2ec67..000000000 --- a/vendor/github.com/go-ole/go-ole/connect.go +++ /dev/null @@ -1,192 +0,0 @@ -package ole - -// Connection contains IUnknown for fluent interface interaction. -// -// Deprecated. Use oleutil package instead. -type Connection struct { - Object *IUnknown // Access COM -} - -// Initialize COM. -func (*Connection) Initialize() (err error) { - return coInitialize() -} - -// Uninitialize COM. -func (*Connection) Uninitialize() { - CoUninitialize() -} - -// Create IUnknown object based first on ProgId and then from String. -func (c *Connection) Create(progId string) (err error) { - var clsid *GUID - clsid, err = CLSIDFromProgID(progId) - if err != nil { - clsid, err = CLSIDFromString(progId) - if err != nil { - return - } - } - - unknown, err := CreateInstance(clsid, IID_IUnknown) - if err != nil { - return - } - c.Object = unknown - - return -} - -// Release IUnknown object. -func (c *Connection) Release() { - c.Object.Release() -} - -// Load COM object from list of programIDs or strings. -func (c *Connection) Load(names ...string) (errors []error) { - var tempErrors []error = make([]error, len(names)) - var numErrors int = 0 - for _, name := range names { - err := c.Create(name) - if err != nil { - tempErrors = append(tempErrors, err) - numErrors += 1 - continue - } - break - } - - copy(errors, tempErrors[0:numErrors]) - return -} - -// Dispatch returns Dispatch object. -func (c *Connection) Dispatch() (object *Dispatch, err error) { - dispatch, err := c.Object.QueryInterface(IID_IDispatch) - if err != nil { - return - } - object = &Dispatch{dispatch} - return -} - -// Dispatch stores IDispatch object. -type Dispatch struct { - Object *IDispatch // Dispatch object. -} - -// Call method on IDispatch with parameters. -func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(method) - if err != nil { - return - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - return -} - -// MustCall method on IDispatch with parameters. -func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(method) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - if err != nil { - panic(err) - } - - return -} - -// Get property on IDispatch with parameters. -func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - return -} - -// MustGet property on IDispatch with parameters. -func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - if err != nil { - panic(err) - } - return -} - -// Set property on IDispatch with parameters. -func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - return -} - -// MustSet property on IDispatch with parameters. -func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - if err != nil { - panic(err) - } - return -} - -// GetId retrieves ID of name on IDispatch. -func (d *Dispatch) GetId(name string) (id int32, err error) { - var dispid []int32 - dispid, err = d.Object.GetIDsOfName([]string{name}) - if err != nil { - return - } - id = dispid[0] - return -} - -// GetIds retrieves all IDs of names on IDispatch. -func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { - dispid, err = d.Object.GetIDsOfName(names) - return -} - -// Invoke IDispatch on DisplayID of dispatch type with parameters. -// -// There have been problems where if send cascading params..., it would error -// out because the parameters would be empty. -func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { - if len(params) < 1 { - result, err = d.Object.Invoke(id, dispatch) - } else { - result, err = d.Object.Invoke(id, dispatch, params...) - } - return -} - -// Release IDispatch object. -func (d *Dispatch) Release() { - d.Object.Release() -} - -// Connect initializes COM and attempts to load IUnknown based on given names. -func Connect(names ...string) (connection *Connection) { - connection.Initialize() - connection.Load(names...) - return -} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go deleted file mode 100644 index fd0c6d74b..000000000 --- a/vendor/github.com/go-ole/go-ole/constants.go +++ /dev/null @@ -1,153 +0,0 @@ -package ole - -const ( - CLSCTX_INPROC_SERVER = 1 - CLSCTX_INPROC_HANDLER = 2 - CLSCTX_LOCAL_SERVER = 4 - CLSCTX_INPROC_SERVER16 = 8 - CLSCTX_REMOTE_SERVER = 16 - CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER - CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER - CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER -) - -const ( - COINIT_APARTMENTTHREADED = 0x2 - COINIT_MULTITHREADED = 0x0 - COINIT_DISABLE_OLE1DDE = 0x4 - COINIT_SPEED_OVER_MEMORY = 0x8 -) - -const ( - DISPATCH_METHOD = 1 - DISPATCH_PROPERTYGET = 2 - DISPATCH_PROPERTYPUT = 4 - DISPATCH_PROPERTYPUTREF = 8 -) - -const ( - S_OK = 0x00000000 - E_UNEXPECTED = 0x8000FFFF - E_NOTIMPL = 0x80004001 - E_OUTOFMEMORY = 0x8007000E - E_INVALIDARG = 0x80070057 - E_NOINTERFACE = 0x80004002 - E_POINTER = 0x80004003 - E_HANDLE = 0x80070006 - E_ABORT = 0x80004004 - E_FAIL = 0x80004005 - E_ACCESSDENIED = 0x80070005 - E_PENDING = 0x8000000A - - CO_E_CLASSSTRING = 0x800401F3 -) - -const ( - CC_FASTCALL = iota - CC_CDECL - CC_MSCPASCAL - CC_PASCAL = CC_MSCPASCAL - CC_MACPASCAL - CC_STDCALL - CC_FPFASTCALL - CC_SYSCALL - CC_MPWCDECL - CC_MPWPASCAL - CC_MAX = CC_MPWPASCAL -) - -type VT uint16 - -const ( - VT_EMPTY VT = 0x0 - VT_NULL VT = 0x1 - VT_I2 VT = 0x2 - VT_I4 VT = 0x3 - VT_R4 VT = 0x4 - VT_R8 VT = 0x5 - VT_CY VT = 0x6 - VT_DATE VT = 0x7 - VT_BSTR VT = 0x8 - VT_DISPATCH VT = 0x9 - VT_ERROR VT = 0xa - VT_BOOL VT = 0xb - VT_VARIANT VT = 0xc - VT_UNKNOWN VT = 0xd - VT_DECIMAL VT = 0xe - VT_I1 VT = 0x10 - VT_UI1 VT = 0x11 - VT_UI2 VT = 0x12 - VT_UI4 VT = 0x13 - VT_I8 VT = 0x14 - VT_UI8 VT = 0x15 - VT_INT VT = 0x16 - VT_UINT VT = 0x17 - VT_VOID VT = 0x18 - VT_HRESULT VT = 0x19 - VT_PTR VT = 0x1a - VT_SAFEARRAY VT = 0x1b - VT_CARRAY VT = 0x1c - VT_USERDEFINED VT = 0x1d - VT_LPSTR VT = 0x1e - VT_LPWSTR VT = 0x1f - VT_RECORD VT = 0x24 - VT_INT_PTR VT = 0x25 - VT_UINT_PTR VT = 0x26 - VT_FILETIME VT = 0x40 - VT_BLOB VT = 0x41 - VT_STREAM VT = 0x42 - VT_STORAGE VT = 0x43 - VT_STREAMED_OBJECT VT = 0x44 - VT_STORED_OBJECT VT = 0x45 - VT_BLOB_OBJECT VT = 0x46 - VT_CF VT = 0x47 - VT_CLSID VT = 0x48 - VT_BSTR_BLOB VT = 0xfff - VT_VECTOR VT = 0x1000 - VT_ARRAY VT = 0x2000 - VT_BYREF VT = 0x4000 - VT_RESERVED VT = 0x8000 - VT_ILLEGAL VT = 0xffff - VT_ILLEGALMASKED VT = 0xfff - VT_TYPEMASK VT = 0xfff -) - -const ( - DISPID_UNKNOWN = -1 - DISPID_VALUE = 0 - DISPID_PROPERTYPUT = -3 - DISPID_NEWENUM = -4 - DISPID_EVALUATE = -5 - DISPID_CONSTRUCTOR = -6 - DISPID_DESTRUCTOR = -7 - DISPID_COLLECT = -8 -) - -const ( - TKIND_ENUM = 1 - TKIND_RECORD = 2 - TKIND_MODULE = 3 - TKIND_INTERFACE = 4 - TKIND_DISPATCH = 5 - TKIND_COCLASS = 6 - TKIND_ALIAS = 7 - TKIND_UNION = 8 - TKIND_MAX = 9 -) - -// Safe Array Feature Flags - -const ( - FADF_AUTO = 0x0001 - FADF_STATIC = 0x0002 - FADF_EMBEDDED = 0x0004 - FADF_FIXEDSIZE = 0x0010 - FADF_RECORD = 0x0020 - FADF_HAVEIID = 0x0040 - FADF_HAVEVARTYPE = 0x0080 - FADF_BSTR = 0x0100 - FADF_UNKNOWN = 0x0200 - FADF_DISPATCH = 0x0400 - FADF_VARIANT = 0x0800 - FADF_RESERVED = 0xF008 -) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go deleted file mode 100644 index 096b456d3..000000000 --- a/vendor/github.com/go-ole/go-ole/error.go +++ /dev/null @@ -1,51 +0,0 @@ -package ole - -// OleError stores COM errors. -type OleError struct { - hr uintptr - description string - subError error -} - -// NewError creates new error with HResult. -func NewError(hr uintptr) *OleError { - return &OleError{hr: hr} -} - -// NewErrorWithDescription creates new COM error with HResult and description. -func NewErrorWithDescription(hr uintptr, description string) *OleError { - return &OleError{hr: hr, description: description} -} - -// NewErrorWithSubError creates new COM error with parent error. -func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { - return &OleError{hr: hr, description: description, subError: err} -} - -// Code is the HResult. -func (v *OleError) Code() uintptr { - return uintptr(v.hr) -} - -// String description, either manually set or format message with error code. -func (v *OleError) String() string { - if v.description != "" { - return errstr(int(v.hr)) + " (" + v.description + ")" - } - return errstr(int(v.hr)) -} - -// Error implements error interface. -func (v *OleError) Error() string { - return v.String() -} - -// Description retrieves error summary, if there is one. -func (v *OleError) Description() string { - return v.description -} - -// SubError returns parent error, if there is one. -func (v *OleError) SubError() error { - return v.subError -} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go deleted file mode 100644 index 8a2ffaa27..000000000 --- a/vendor/github.com/go-ole/go-ole/error_func.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package ole - -// errstr converts error code to string. -func errstr(errno int) string { - return "" -} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go deleted file mode 100644 index d0e8e6859..000000000 --- a/vendor/github.com/go-ole/go-ole/error_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build windows - -package ole - -import ( - "fmt" - "syscall" - "unicode/utf16" -) - -// errstr converts error code to string. -func errstr(errno int) string { - // ask windows for the remaining errors - var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS - b := make([]uint16, 300) - n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) - if err != nil { - return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) - } - // trim terminating \r and \n - for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { - } - return string(utf16.Decode(b[:n])) -} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go deleted file mode 100644 index 8d20f68fb..000000000 --- a/vendor/github.com/go-ole/go-ole/guid.go +++ /dev/null @@ -1,284 +0,0 @@ -package ole - -var ( - // IID_NULL is null Interface ID, used when no other Interface ID is known. - IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") - - // IID_IUnknown is for IUnknown interfaces. - IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") - - // IID_IDispatch is for IDispatch interfaces. - IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") - - // IID_IEnumVariant is for IEnumVariant interfaces - IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") - - // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. - IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") - - // IID_IConnectionPoint is for IConnectionPoint interfaces. - IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") - - // IID_IInspectable is for IInspectable interfaces. - IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") - - // IID_IProvideClassInfo is for IProvideClassInfo interfaces. - IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") -) - -// These are for testing and not part of any library. -var ( - // IID_ICOMTestString is for ICOMTestString interfaces. - // - // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} - IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") - - // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. - // - // {BEB06610-EB84-4155-AF58-E2BFF53680B4} - IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") - - // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. - // - // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} - IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") - - // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. - // - // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} - IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") - - // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. - // - // {8D437CBC-B3ED-485C-BC32-C336432A1623} - IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") - - // IID_ICOMTestFloat is for ICOMTestFloat interfaces. - // - // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} - IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") - - // IID_ICOMTestDouble is for ICOMTestDouble interfaces. - // - // {BF908A81-8687-4E93-999F-D86FAB284BA0} - IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") - - // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. - // - // {D530E7A6-4EE8-40D1-8931-3D63B8605010} - IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") - - // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. - // - // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} - IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") - - // IID_ICOMTestTypes is for ICOMTestTypes interfaces. - // - // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} - IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") - - // CLSID_COMEchoTestObject is for COMEchoTestObject class. - // - // {3C24506A-AE9E-4D50-9157-EF317281F1B0} - CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") - - // CLSID_COMTestScalarClass is for COMTestScalarClass class. - // - // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} - CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") -) - -const hextable = "0123456789ABCDEF" -const emptyGUID = "{00000000-0000-0000-0000-000000000000}" - -// GUID is Windows API specific GUID type. -// -// This exists to match Windows GUID type for direct passing for COM. -// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -// NewGUID converts the given string into a globally unique identifier that is -// compliant with the Windows API. -// -// The supplied string may be in any of these formats: -// -// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// The conversion of the supplied string is not case-sensitive. -func NewGUID(guid string) *GUID { - d := []byte(guid) - var d1, d2, d3, d4a, d4b []byte - - switch len(d) { - case 38: - if d[0] != '{' || d[37] != '}' { - return nil - } - d = d[1:37] - fallthrough - case 36: - if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { - return nil - } - d1 = d[0:8] - d2 = d[9:13] - d3 = d[14:18] - d4a = d[19:23] - d4b = d[24:36] - case 32: - d1 = d[0:8] - d2 = d[8:12] - d3 = d[12:16] - d4a = d[16:20] - d4b = d[20:32] - default: - return nil - } - - var g GUID - var ok1, ok2, ok3, ok4 bool - g.Data1, ok1 = decodeHexUint32(d1) - g.Data2, ok2 = decodeHexUint16(d2) - g.Data3, ok3 = decodeHexUint16(d3) - g.Data4, ok4 = decodeHexByte64(d4a, d4b) - if ok1 && ok2 && ok3 && ok4 { - return &g - } - return nil -} - -func decodeHexUint32(src []byte) (value uint32, ok bool) { - var b1, b2, b3, b4 byte - var ok1, ok2, ok3, ok4 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - b3, ok3 = decodeHexByte(src[4], src[5]) - b4, ok4 = decodeHexByte(src[6], src[7]) - value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) - ok = ok1 && ok2 && ok3 && ok4 - return -} - -func decodeHexUint16(src []byte) (value uint16, ok bool) { - var b1, b2 byte - var ok1, ok2 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - value = (uint16(b1) << 8) | uint16(b2) - ok = ok1 && ok2 - return -} - -func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { - var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool - value[0], ok1 = decodeHexByte(s1[0], s1[1]) - value[1], ok2 = decodeHexByte(s1[2], s1[3]) - value[2], ok3 = decodeHexByte(s2[0], s2[1]) - value[3], ok4 = decodeHexByte(s2[2], s2[3]) - value[4], ok5 = decodeHexByte(s2[4], s2[5]) - value[5], ok6 = decodeHexByte(s2[6], s2[7]) - value[6], ok7 = decodeHexByte(s2[8], s2[9]) - value[7], ok8 = decodeHexByte(s2[10], s2[11]) - ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 - return -} - -func decodeHexByte(c1, c2 byte) (value byte, ok bool) { - var n1, n2 byte - var ok1, ok2 bool - n1, ok1 = decodeHexChar(c1) - n2, ok2 = decodeHexChar(c2) - value = (n1 << 4) | n2 - ok = ok1 && ok2 - return -} - -func decodeHexChar(c byte) (byte, bool) { - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - - return 0, false -} - -// String converts the GUID to string form. It will adhere to this pattern: -// -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// If the GUID is nil, the string representation of an empty GUID is returned: -// -// {00000000-0000-0000-0000-000000000000} -func (guid *GUID) String() string { - if guid == nil { - return emptyGUID - } - - var c [38]byte - c[0] = '{' - putUint32Hex(c[1:9], guid.Data1) - c[9] = '-' - putUint16Hex(c[10:14], guid.Data2) - c[14] = '-' - putUint16Hex(c[15:19], guid.Data3) - c[19] = '-' - putByteHex(c[20:24], guid.Data4[0:2]) - c[24] = '-' - putByteHex(c[25:37], guid.Data4[2:8]) - c[37] = '}' - return string(c[:]) -} - -func putUint32Hex(b []byte, v uint32) { - b[0] = hextable[byte(v>>24)>>4] - b[1] = hextable[byte(v>>24)&0x0f] - b[2] = hextable[byte(v>>16)>>4] - b[3] = hextable[byte(v>>16)&0x0f] - b[4] = hextable[byte(v>>8)>>4] - b[5] = hextable[byte(v>>8)&0x0f] - b[6] = hextable[byte(v)>>4] - b[7] = hextable[byte(v)&0x0f] -} - -func putUint16Hex(b []byte, v uint16) { - b[0] = hextable[byte(v>>8)>>4] - b[1] = hextable[byte(v>>8)&0x0f] - b[2] = hextable[byte(v)>>4] - b[3] = hextable[byte(v)&0x0f] -} - -func putByteHex(dst, src []byte) { - for i := 0; i < len(src); i++ { - dst[i*2] = hextable[src[i]>>4] - dst[i*2+1] = hextable[src[i]&0x0f] - } -} - -// IsEqualGUID compares two GUID. -// -// Not constant time comparison. -func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { - return guid1.Data1 == guid2.Data1 && - guid1.Data2 == guid2.Data2 && - guid1.Data3 == guid2.Data3 && - guid1.Data4[0] == guid2.Data4[0] && - guid1.Data4[1] == guid2.Data4[1] && - guid1.Data4[2] == guid2.Data4[2] && - guid1.Data4[3] == guid2.Data4[3] && - guid1.Data4[4] == guid2.Data4[4] && - guid1.Data4[5] == guid2.Data4[5] && - guid1.Data4[6] == guid2.Data4[6] && - guid1.Data4[7] == guid2.Data4[7] -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go deleted file mode 100644 index 9e6c49f41..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go +++ /dev/null @@ -1,20 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPoint struct { - IUnknown -} - -type IConnectionPointVtbl struct { - IUnknownVtbl - GetConnectionInterface uintptr - GetConnectionPointContainer uintptr - Advise uintptr - Unadvise uintptr - EnumConnections uintptr -} - -func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { - return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go deleted file mode 100644 index 5414dc3cd..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package ole - -import "unsafe" - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - return int32(0) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go deleted file mode 100644 index 32bc18324..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - // XXX: This doesn't look like it does what it's supposed to - return release((*IUnknown)(unsafe.Pointer(v))) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Advise, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(unknown)), - uintptr(unsafe.Pointer(&cookie))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Unadvise, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(cookie), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go deleted file mode 100644 index 165860d19..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go +++ /dev/null @@ -1,17 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPointContainer struct { - IUnknown -} - -type IConnectionPointContainerVtbl struct { - IUnknownVtbl - EnumConnectionPoints uintptr - FindConnectionPoint uintptr -} - -func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { - return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go deleted file mode 100644 index 5dfa42aae..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package ole - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go deleted file mode 100644 index ad30d79ef..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().FindConnectionPoint, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(point))) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go deleted file mode 100644 index d4af12409..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch.go +++ /dev/null @@ -1,94 +0,0 @@ -package ole - -import "unsafe" - -type IDispatch struct { - IUnknown -} - -type IDispatchVtbl struct { - IUnknownVtbl - GetTypeInfoCount uintptr - GetTypeInfo uintptr - GetIDsOfNames uintptr - Invoke uintptr -} - -func (v *IDispatch) VTable() *IDispatchVtbl { - return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { - dispid, err = getIDsOfName(v, names) - return -} - -func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - result, err = invoke(v, dispid, dispatch, params...) - return -} - -func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { - c, err = getTypeInfoCount(v) - return -} - -func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { - tinfo, err = getTypeInfo(v) - return -} - -// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. -// -// This replaces the common pattern of attempting to get a single name from the list of available -// IDs. It gives the first ID, if it is available. -func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { - var displayIDs []int32 - displayIDs, err = v.GetIDsOfName([]string{name}) - if err != nil { - return - } - displayID = displayIDs[0] - return -} - -// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. -// -// Accepts name and will attempt to retrieve Display ID to pass to Invoke. -// -// Passing params as an array is a workaround that could be fixed in later versions of Go that -// prevent passing empty params. During testing it was discovered that this is an acceptable way of -// getting around not being able to pass params normally. -func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { - displayID, err := v.GetSingleIDOfName(name) - if err != nil { - return - } - - if len(params) < 1 { - result, err = v.Invoke(displayID, dispatch) - } else { - result, err = v.Invoke(displayID, dispatch, params...) - } - - return -} - -// CallMethod invokes named function with arguments on object. -func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) -} - -// GetProperty retrieves the property with the name with the ability to pass arguments. -// -// Most of the time you will not need to pass arguments as most objects do not allow for this -// feature. Or at least, should not allow for this feature. Some servers don't follow best practices -// and this is provided for those edge cases. -func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) -} - -// PutProperty attempts to mutate a property in the object. -func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go deleted file mode 100644 index b8fbbe319..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { - return []int32{}, NewError(E_NOTIMPL) -} - -func getTypeInfoCount(disp *IDispatch) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { - return nil, NewError(E_NOTIMPL) -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go deleted file mode 100644 index b399f0479..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ /dev/null @@ -1,202 +0,0 @@ -// +build windows - -package ole - -import ( - "math/big" - "syscall" - "time" - "unsafe" -) - -func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { - wnames := make([]*uint16, len(names)) - for i := 0; i < len(names); i++ { - wnames[i] = syscall.StringToUTF16Ptr(names[i]) - } - dispid = make([]int32, len(names)) - namelen := uint32(len(names)) - hr, _, _ := syscall.Syscall6( - disp.VTable().GetIDsOfNames, - 6, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(unsafe.Pointer(&wnames[0])), - uintptr(namelen), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&dispid[0]))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfoCount, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&c)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfo, - 3, - uintptr(unsafe.Pointer(disp)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&tinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - var dispparams DISPPARAMS - - if dispatch&DISPATCH_PROPERTYPUT != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } - var vargs []VARIANT - if len(params) > 0 { - vargs = make([]VARIANT, len(params)) - for i, v := range params { - //n := len(params)-i-1 - n := len(params) - i - 1 - VariantInit(&vargs[n]) - switch vv := v.(type) { - case bool: - if vv { - vargs[n] = NewVariant(VT_BOOL, 0xffff) - } else { - vargs[n] = NewVariant(VT_BOOL, 0) - } - case *bool: - vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) - case uint8: - vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) - case *uint8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int8: - vargs[n] = NewVariant(VT_I1, int64(v.(int8))) - case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int16: - vargs[n] = NewVariant(VT_I2, int64(v.(int16))) - case *int16: - vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) - case uint16: - vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) - case *uint16: - vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) - case int32: - vargs[n] = NewVariant(VT_I4, int64(v.(int32))) - case *int32: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) - case uint32: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) - case *uint32: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) - case int64: - vargs[n] = NewVariant(VT_I8, int64(v.(int64))) - case *int64: - vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) - case uint64: - vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) - case *uint64: - vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) - case int: - vargs[n] = NewVariant(VT_I4, int64(v.(int))) - case *int: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) - case uint: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) - case *uint: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) - case float32: - vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) - case *float32: - vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) - case float64: - vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) - case *float64: - vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) - case *big.Int: - vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) - case string: - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) - case *string: - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) - case time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) - case *time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) - case *IDispatch: - vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) - case **IDispatch: - vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) - case nil: - vargs[n] = NewVariant(VT_NULL, 0) - case *VARIANT: - vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) - case []byte: - safeByteArray := safeArrayFromByteSlice(v.([]byte)) - vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - case []string: - safeByteArray := safeArrayFromStringSlice(v.([]string)) - vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - default: - panic("unknown type") - } - } - dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) - dispparams.cArgs = uint32(len(params)) - } - - result = new(VARIANT) - var excepInfo EXCEPINFO - VariantInit(result) - hr, _, _ := syscall.Syscall9( - disp.VTable().Invoke, - 9, - uintptr(unsafe.Pointer(disp)), - uintptr(dispid), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(GetUserDefaultLCID()), - uintptr(dispatch), - uintptr(unsafe.Pointer(&dispparams)), - uintptr(unsafe.Pointer(result)), - uintptr(unsafe.Pointer(&excepInfo)), - 0) - if hr != 0 { - excepInfo.renderStrings() - excepInfo.Clear() - err = NewErrorWithSubError(hr, excepInfo.description, excepInfo) - } - for i, varg := range vargs { - n := len(params) - i - 1 - if varg.VT == VT_BSTR && varg.Val != 0 { - SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) - } - if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { - *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) - } - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go deleted file mode 100644 index 243389754..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant.go +++ /dev/null @@ -1,19 +0,0 @@ -package ole - -import "unsafe" - -type IEnumVARIANT struct { - IUnknown -} - -type IEnumVARIANTVtbl struct { - IUnknownVtbl - Next uintptr - Skip uintptr - Reset uintptr - Clone uintptr -} - -func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { - return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go deleted file mode 100644 index c14848199..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { - return nil, NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Reset() error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Skip(celt uint) error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { - return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go deleted file mode 100644 index 4781f3b8b..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Clone, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(unsafe.Pointer(&cloned)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Reset() (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Reset, - 1, - uintptr(unsafe.Pointer(enum)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Skip(celt uint) (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Skip, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { - hr, _, _ := syscall.Syscall6( - enum.VTable().Next, - 4, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - uintptr(unsafe.Pointer(&array)), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go deleted file mode 100644 index f4a19e253..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable.go +++ /dev/null @@ -1,18 +0,0 @@ -package ole - -import "unsafe" - -type IInspectable struct { - IUnknown -} - -type IInspectableVtbl struct { - IUnknownVtbl - GetIIds uintptr - GetRuntimeClassName uintptr - GetTrustLevel uintptr -} - -func (v *IInspectable) VTable() *IInspectableVtbl { - return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go deleted file mode 100644 index 348829bf0..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_func.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package ole - -func (v *IInspectable) GetIids() ([]*GUID, error) { - return []*GUID{}, NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetRuntimeClassName() (string, error) { - return "", NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetTrustLevel() (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go deleted file mode 100644 index 4519a4aa4..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build windows - -package ole - -import ( - "bytes" - "encoding/binary" - "reflect" - "syscall" - "unsafe" -) - -func (v *IInspectable) GetIids() (iids []*GUID, err error) { - var count uint32 - var array uintptr - hr, _, _ := syscall.Syscall( - v.VTable().GetIIds, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&count)), - uintptr(unsafe.Pointer(&array))) - if hr != 0 { - err = NewError(hr) - return - } - defer CoTaskMemFree(array) - - iids = make([]*GUID, count) - byteCount := count * uint32(unsafe.Sizeof(GUID{})) - slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} - byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) - reader := bytes.NewReader(byteSlice) - for i := range iids { - guid := GUID{} - err = binary.Read(reader, binary.LittleEndian, &guid) - if err != nil { - return - } - iids[i] = &guid - } - return -} - -func (v *IInspectable) GetRuntimeClassName() (s string, err error) { - var hstring HString - hr, _, _ := syscall.Syscall( - v.VTable().GetRuntimeClassName, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&hstring)), - 0) - if hr != 0 { - err = NewError(hr) - return - } - s = hstring.String() - DeleteHString(hstring) - return -} - -func (v *IInspectable) GetTrustLevel() (level uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().GetTrustLevel, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&level)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go deleted file mode 100644 index 25f3a6f24..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go +++ /dev/null @@ -1,21 +0,0 @@ -package ole - -import "unsafe" - -type IProvideClassInfo struct { - IUnknown -} - -type IProvideClassInfoVtbl struct { - IUnknownVtbl - GetClassInfo uintptr -} - -func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { - return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { - cinfo, err = getClassInfo(v) - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go deleted file mode 100644 index 7e3cb63ea..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go deleted file mode 100644 index 2ad016394..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetClassInfo, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&tinfo)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go deleted file mode 100644 index dd3c5e21b..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -package ole - -import "unsafe" - -type ITypeInfo struct { - IUnknown -} - -type ITypeInfoVtbl struct { - IUnknownVtbl - GetTypeAttr uintptr - GetTypeComp uintptr - GetFuncDesc uintptr - GetVarDesc uintptr - GetNames uintptr - GetRefTypeOfImplType uintptr - GetImplTypeFlags uintptr - GetIDsOfNames uintptr - Invoke uintptr - GetDocumentation uintptr - GetDllEntry uintptr - GetRefTypeInfo uintptr - AddressOfMember uintptr - CreateInstance uintptr - GetMops uintptr - GetContainingTypeLib uintptr - ReleaseTypeAttr uintptr - ReleaseFuncDesc uintptr - ReleaseVarDesc uintptr -} - -func (v *ITypeInfo) VTable() *ITypeInfoVtbl { - return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go deleted file mode 100644 index 8364a659b..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go deleted file mode 100644 index 54782b3da..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { - hr, _, _ := syscall.Syscall( - uintptr(v.VTable().GetTypeAttr), - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&tattr)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go deleted file mode 100644 index 108f28ea6..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown.go +++ /dev/null @@ -1,57 +0,0 @@ -package ole - -import "unsafe" - -type IUnknown struct { - RawVTable *interface{} -} - -type IUnknownVtbl struct { - QueryInterface uintptr - AddRef uintptr - Release uintptr -} - -type UnknownLike interface { - QueryInterface(iid *GUID) (disp *IDispatch, err error) - AddRef() int32 - Release() int32 -} - -func (v *IUnknown) VTable() *IUnknownVtbl { - return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { - return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) -} - -func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { - err = v.PutQueryInterface(interfaceID, &dispatch) - return -} - -func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { - err = v.PutQueryInterface(interfaceID, &enum) - return -} - -func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { - return queryInterface(v, iid) -} - -func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { - unk, err := queryInterface(v, iid) - if err != nil { - panic(err) - } - return unk -} - -func (v *IUnknown) AddRef() int32 { - return addRef(v) -} - -func (v *IUnknown) Release() int32 { - return release(v) -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go deleted file mode 100644 index d0a62cfd7..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - return NewError(E_NOTIMPL) -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - return nil, NewError(E_NOTIMPL) -} - -func addRef(unk *IUnknown) int32 { - return 0 -} - -func release(unk *IUnknown) int32 { - return 0 -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go deleted file mode 100644 index ede5bb8c1..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unsafe" -) - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - selfValue := reflect.ValueOf(self).Elem() - objValue := reflect.ValueOf(obj).Elem() - - hr, _, _ := syscall.Syscall( - method, - 3, - selfValue.UnsafeAddr(), - uintptr(unsafe.Pointer(interfaceID)), - objValue.Addr().Pointer()) - if hr != 0 { - err = NewError(hr) - } - return -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - hr, _, _ := syscall.Syscall( - unk.VTable().QueryInterface, - 3, - uintptr(unsafe.Pointer(unk)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func addRef(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().AddRef, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} - -func release(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().Release, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go deleted file mode 100644 index dbd132bbd..000000000 --- a/vendor/github.com/go-ole/go-ole/ole.go +++ /dev/null @@ -1,190 +0,0 @@ -package ole - -import ( - "fmt" - "strings" - "unsafe" -) - -// DISPPARAMS are the arguments that passed to methods or property. -type DISPPARAMS struct { - rgvarg uintptr - rgdispidNamedArgs uintptr - cArgs uint32 - cNamedArgs uint32 -} - -// EXCEPINFO defines exception info. -type EXCEPINFO struct { - wCode uint16 - wReserved uint16 - bstrSource *uint16 - bstrDescription *uint16 - bstrHelpFile *uint16 - dwHelpContext uint32 - pvReserved uintptr - pfnDeferredFillIn uintptr - scode uint32 - - // Go-specific part. Don't move upper cos it'll break structure layout for native code. - rendered bool - source string - description string - helpFile string -} - -// renderStrings translates BSTR strings to Go ones so `.Error` and `.String` -// could be safely called after `.Clear`. We need this when we can't rely on -// a caller to call `.Clear`. -func (e *EXCEPINFO) renderStrings() { - e.rendered = true - if e.bstrSource == nil { - e.source = "" - } else { - e.source = BstrToString(e.bstrSource) - } - if e.bstrDescription == nil { - e.description = "" - } else { - e.description = BstrToString(e.bstrDescription) - } - if e.bstrHelpFile == nil { - e.helpFile = "" - } else { - e.helpFile = BstrToString(e.bstrHelpFile) - } -} - -// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL. -func (e *EXCEPINFO) Clear() { - freeBSTR := func(s *uint16) { - // SysFreeString don't return errors and is safe for call's on NULL. - // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring - _ = SysFreeString((*int16)(unsafe.Pointer(s))) - } - - if e.bstrSource != nil { - freeBSTR(e.bstrSource) - e.bstrSource = nil - } - if e.bstrDescription != nil { - freeBSTR(e.bstrDescription) - e.bstrDescription = nil - } - if e.bstrHelpFile != nil { - freeBSTR(e.bstrHelpFile) - e.bstrHelpFile = nil - } -} - -// WCode return wCode in EXCEPINFO. -func (e EXCEPINFO) WCode() uint16 { - return e.wCode -} - -// SCODE return scode in EXCEPINFO. -func (e EXCEPINFO) SCODE() uint32 { - return e.scode -} - -// String convert EXCEPINFO to string. -func (e EXCEPINFO) String() string { - if !e.rendered { - e.renderStrings() - } - return fmt.Sprintf( - "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", - e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode, - ) -} - -// Error implements error interface and returns error string. -func (e EXCEPINFO) Error() string { - if !e.rendered { - e.renderStrings() - } - - if e.description != "" { - return strings.TrimSpace(e.description) - } - - code := e.scode - if e.wCode != 0 { - code = uint32(e.wCode) - } - return fmt.Sprintf("%v: %#x", e.source, code) -} - -// PARAMDATA defines parameter data type. -type PARAMDATA struct { - Name *int16 - Vt uint16 -} - -// METHODDATA defines method info. -type METHODDATA struct { - Name *uint16 - Data *PARAMDATA - Dispid int32 - Meth uint32 - CC int32 - CArgs uint32 - Flags uint16 - VtReturn uint32 -} - -// INTERFACEDATA defines interface info. -type INTERFACEDATA struct { - MethodData *METHODDATA - CMembers uint32 -} - -// Point is 2D vector type. -type Point struct { - X int32 - Y int32 -} - -// Msg is message between processes. -type Msg struct { - Hwnd uint32 - Message uint32 - Wparam int32 - Lparam int32 - Time uint32 - Pt Point -} - -// TYPEDESC defines data type. -type TYPEDESC struct { - Hreftype uint32 - VT uint16 -} - -// IDLDESC defines IDL info. -type IDLDESC struct { - DwReserved uint32 - WIDLFlags uint16 -} - -// TYPEATTR defines type info. -type TYPEATTR struct { - Guid GUID - Lcid uint32 - dwReserved uint32 - MemidConstructor int32 - MemidDestructor int32 - LpstrSchema *uint16 - CbSizeInstance uint32 - Typekind int32 - CFuncs uint16 - CVars uint16 - CImplTypes uint16 - CbSizeVft uint16 - CbAlignment uint16 - WTypeFlags uint16 - WMajorVerNum uint16 - WMinorVerNum uint16 - TdescAlias TYPEDESC - IdldescType IDLDESC -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go deleted file mode 100644 index 60df73cda..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -type stdDispatch struct { - lpVtbl *stdDispatchVtbl - ref int32 - iid *ole.GUID - iface interface{} - funcMap map[string]int32 -} - -type stdDispatchVtbl struct { - pQueryInterface uintptr - pAddRef uintptr - pRelease uintptr - pGetTypeInfoCount uintptr - pGetTypeInfo uintptr - pGetIDsOfNames uintptr - pInvoke uintptr -} - -func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - *punk = nil - if ole.IsEqualGUID(iid, ole.IID_IUnknown) || - ole.IsEqualGUID(iid, ole.IID_IDispatch) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - if ole.IsEqualGUID(iid, pthis.iid) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - return ole.E_NOINTERFACE -} - -func dispAddRef(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref++ - return pthis.ref -} - -func dispRelease(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref-- - return pthis.ref -} - -func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - names := make([]string, len(wnames)) - for i := 0; i < len(names); i++ { - names[i] = ole.LpOleStrToString(wnames[i]) - } - for n := 0; n < namelen; n++ { - if id, ok := pthis.funcMap[names[n]]; ok { - pdisp[n] = id - } - } - return ole.S_OK -} - -func dispGetTypeInfoCount(pcount *int) uintptr { - if pcount != nil { - *pcount = 0 - } - return ole.S_OK -} - -func dispGetTypeInfo(ptypeif *uintptr) uintptr { - return ole.E_NOTIMPL -} - -func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - found := "" - for name, id := range pthis.funcMap { - if id == dispid { - found = name - } - } - if found != "" { - rv := reflect.ValueOf(pthis.iface).Elem() - rm := rv.MethodByName(found) - rr := rm.Call([]reflect.Value{}) - println(len(rr)) - return ole.S_OK - } - return ole.E_NOTIMPL -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go deleted file mode 100644 index 8818fb827..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { - return 0, ole.NewError(ole.E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go deleted file mode 100644 index ab9c0d8dc..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "syscall" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { - unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) - if err != nil { - return - } - - container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) - var point *ole.IConnectionPoint - err = container.FindConnectionPoint(iid, &point) - if err != nil { - return - } - if edisp, ok := idisp.(*ole.IUnknown); ok { - cookie, err = point.Advise(edisp) - container.Release() - if err != nil { - return - } - } - rv := reflect.ValueOf(disp).Elem() - if rv.Type().Kind() == reflect.Struct { - dest := &stdDispatch{} - dest.lpVtbl = &stdDispatchVtbl{} - dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) - dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) - dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) - dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) - dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) - dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) - dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) - dest.iface = disp - dest.iid = iid - cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) - container.Release() - if err != nil { - point.Release() - return - } - return - } - - container.Release() - - return 0, ole.NewError(ole.E_INVALIDARG) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go deleted file mode 100644 index 58347628f..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go +++ /dev/null @@ -1,6 +0,0 @@ -// This file is here so go get succeeds as without it errors with: -// no buildable Go source files in ... -// -// +build !windows - -package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go deleted file mode 100644 index f7803c1e3..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go +++ /dev/null @@ -1,127 +0,0 @@ -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -func ClassIDFrom(programID string) (classID *ole.GUID, err error) { - return ole.ClassIDFrom(programID) -} - -// CreateObject creates object from programID based on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func CreateObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// GetActiveObject retrieves active object for program ID and interface ID based -// on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// CallMethod calls method on IDispatch with parameters. -func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) -} - -// MustCallMethod calls method on IDispatch with parameters or panics. -func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := CallMethod(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// GetProperty retrieves property from IDispatch. -func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) -} - -// MustGetProperty retrieves property from IDispatch or panics. -func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := GetProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutProperty mutates property. -func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) -} - -// MustPutProperty mutates property or panics. -func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutPropertyRef mutates property reference. -func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) -} - -// MustPutPropertyRef mutates property reference or panics. -func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutPropertyRef(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { - newEnum, err := disp.GetProperty("_NewEnum") - if err != nil { - return err - } - defer newEnum.Clear() - - enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - defer enum.Release() - - for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { - if err != nil { - return err - } - if ferr := f(&item); ferr != nil { - return ferr - } - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go deleted file mode 100644 index a5201b56c..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package is meant to retrieve and process safe array data returned from COM. - -package ole - -// SafeArrayBound defines the SafeArray boundaries. -type SafeArrayBound struct { - Elements uint32 - LowerBound int32 -} - -// SafeArray is how COM handles arrays. -type SafeArray struct { - Dimensions uint16 - FeaturesFlag uint16 - ElementsSize uint32 - LocksAmount uint32 - Data uint32 - Bounds [16]byte -} - -// SAFEARRAY is obsolete, exists for backwards compatibility. -// Use SafeArray -type SAFEARRAY SafeArray - -// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. -// Use SafeArrayBound -type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go deleted file mode 100644 index 0dee670ce..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray_func.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build !windows - -package ole - -import ( - "unsafe" -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { - return uintptr(0), NewError(E_NOTIMPL) -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { - return uint16(0), NewError(E_NOTIMPL) -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go deleted file mode 100644 index 0c1b3a10f..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray_windows.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -var ( - procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData") - procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData") - procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor") - procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx") - procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy") - procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData") - procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate") - procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx") - procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector") - procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx") - procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy") - procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData") - procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor") - procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim") - procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement") - procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize") - procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID") - procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound") - procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound") - procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype") - procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock") - procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex") - procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData") - procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock") - procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement") - //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO - //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO - procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo") - procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo") -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -// Todo: Test -func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { - err = convertHresultToError( - procSafeArrayAccessData.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&element)))) - return -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptorEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayCopy.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { - err = convertHresultToError( - procSafeArrayCopyData.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(duplicate)))) - return -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreate.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds))) - safearray = (*SafeArray)(unsafe.Pointer(&sa)) - return -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds)), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVector.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length)) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVectorEx.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { - l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) - dimensions = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { - l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) - length = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { - return convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(pv))) -} - -// safeArrayGetElementString retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { - var element *int16 - err = convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(&element)))) - str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) - SysFreeString(element) - return -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { - err = convertHresultToError( - procSafeArrayGetIID.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&guid)))) - return -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { - err = convertHresultToError( - procSafeArrayGetLBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&lowerBound)))) - return -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { - err = convertHresultToError( - procSafeArrayGetUBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&upperBound)))) - return -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { - err = convertHresultToError( - procSafeArrayGetVartype.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&varType)))) - return -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { - err = convertHresultToError( - procSafeArrayPutElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(element)))) - return -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { - err = convertHresultToError( - procSafeArrayGetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { - err = convertHresultToError( - procSafeArraySetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go deleted file mode 100644 index da737293d..000000000 --- a/vendor/github.com/go-ole/go-ole/safearrayconversion.go +++ /dev/null @@ -1,140 +0,0 @@ -// Helper for converting SafeArray to array of objects. - -package ole - -import ( - "unsafe" -) - -type SafeArrayConversion struct { - Array *SafeArray -} - -func (sac *SafeArrayConversion) ToStringArray() (strings []string) { - totalElements, _ := sac.TotalElements(0) - strings = make([]string, totalElements) - - for i := int32(0); i < totalElements; i++ { - strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) - } - - return -} - -func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { - totalElements, _ := sac.TotalElements(0) - bytes = make([]byte, totalElements) - - for i := int32(0); i < totalElements; i++ { - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) - } - - return -} - -func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { - totalElements, _ := sac.TotalElements(0) - values = make([]interface{}, totalElements) - vt, _ := safeArrayGetVartype(sac.Array) - - for i := int32(0); i < totalElements; i++ { - switch VT(vt) { - case VT_BOOL: - var v bool - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I1: - var v int8 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I2: - var v int16 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I4: - var v int32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I8: - var v int64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI1: - var v uint8 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI2: - var v uint16 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI4: - var v uint32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI8: - var v uint64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_R4: - var v float32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_R8: - var v float64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_BSTR: - v , _ := safeArrayGetElementString(sac.Array, i) - values[i] = v - case VT_VARIANT: - var v VARIANT - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v.Value() - v.Clear() - default: - // TODO - } - } - - return -} - -func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { - return safeArrayGetVartype(sac.Array) -} - -func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { - return safeArrayGetDim(sac.Array) -} - -func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { - return safeArrayGetElementSize(sac.Array) -} - -func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { - if index < 1 { - index = 1 - } - - // Get array bounds - var LowerBounds int32 - var UpperBounds int32 - - LowerBounds, err = safeArrayGetLBound(sac.Array, index) - if err != nil { - return - } - - UpperBounds, err = safeArrayGetUBound(sac.Array, index) - if err != nil { - return - } - - totalElements = UpperBounds - LowerBounds + 1 - return -} - -// Release Safe Array memory -func (sac *SafeArrayConversion) Release() { - safeArrayDestroy(sac.Array) -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go deleted file mode 100644 index a9fa885f1..000000000 --- a/vendor/github.com/go-ole/go-ole/safearrayslices.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -func safeArrayFromByteSlice(slice []byte) *SafeArray { - array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []byte to SAFEARRAY") - } - - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) - } - return array -} - -func safeArrayFromStringSlice(slice []string) *SafeArray { - array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []string to SAFEARRAY") - } - // SysAllocStringLen(s) - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) - } - return array -} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go deleted file mode 100644 index 99ee82dc3..000000000 --- a/vendor/github.com/go-ole/go-ole/utility.go +++ /dev/null @@ -1,101 +0,0 @@ -package ole - -import ( - "unicode/utf16" - "unsafe" -) - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -// -// Helper that provides check against both Class ID from Program ID and Class ID from string. It is -// faster, if you know which you are using, to use the individual functions, but this will check -// against available functions for you. -func ClassIDFrom(programID string) (classID *GUID, err error) { - classID, err = CLSIDFromProgID(programID) - if err != nil { - classID, err = CLSIDFromString(programID) - if err != nil { - return - } - } - return -} - -// BytePtrToString converts byte pointer to a Go string. -func BytePtrToString(p *byte) string { - a := (*[10000]uint8)(unsafe.Pointer(p)) - i := 0 - for a[i] != 0 { - i++ - } - return string(a[:i]) -} - -// UTF16PtrToString is alias for LpOleStrToString. -// -// Kept for compatibility reasons. -func UTF16PtrToString(p *uint16) string { - return LpOleStrToString(p) -} - -// LpOleStrToString converts COM Unicode to Go string. -func LpOleStrToString(p *uint16) string { - if p == nil { - return "" - } - - length := lpOleStrLen(p) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - - return string(utf16.Decode(a)) -} - -// BstrToString converts COM binary string to Go string. -func BstrToString(p *uint16) string { - if p == nil { - return "" - } - length := SysStringLen((*int16)(unsafe.Pointer(p))) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return string(utf16.Decode(a)) -} - -// lpOleStrLen returns the length of Unicode string. -func lpOleStrLen(p *uint16) (length int64) { - if p == nil { - return 0 - } - - ptr := unsafe.Pointer(p) - - for i := 0; ; i++ { - if 0 == *(*uint16)(ptr) { - length = int64(i) - break - } - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return -} - -// convertHresultToError converts syscall to error, if call is unsuccessful. -func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go deleted file mode 100644 index a6add1b00..000000000 --- a/vendor/github.com/go-ole/go-ole/variables.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build windows - -package ole - -import ( - "golang.org/x/sys/windows" -) - -var ( - modcombase = windows.NewLazySystemDLL("combase.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modole32 = windows.NewLazySystemDLL("ole32.dll") - modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll") - moduser32 = windows.NewLazySystemDLL("user32.dll") -) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go deleted file mode 100644 index 967a23fea..000000000 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ /dev/null @@ -1,105 +0,0 @@ -package ole - -import "unsafe" - -// NewVariant returns new variant based on type and value. -func NewVariant(vt VT, val int64) VARIANT { - return VARIANT{VT: vt, Val: val} -} - -// ToIUnknown converts Variant to Unknown object. -func (v *VARIANT) ToIUnknown() *IUnknown { - if v.VT != VT_UNKNOWN { - return nil - } - return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToIDispatch converts variant to dispatch object. -func (v *VARIANT) ToIDispatch() *IDispatch { - if v.VT != VT_DISPATCH { - return nil - } - return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToArray converts variant to SafeArray helper. -func (v *VARIANT) ToArray() *SafeArrayConversion { - if v.VT != VT_SAFEARRAY { - if v.VT&VT_ARRAY == 0 { - return nil - } - } - var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) - return &SafeArrayConversion{safeArray} -} - -// ToString converts variant to Go string. -func (v *VARIANT) ToString() string { - if v.VT != VT_BSTR { - return "" - } - return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) -} - -// Clear the memory of variant object. -func (v *VARIANT) Clear() error { - return VariantClear(v) -} - -// Value returns variant value based on its type. -// -// Currently supported types: 2- and 4-byte integers, strings, bools. -// Note that 64-bit integers, datetimes, and other types are stored as strings -// and will be returned as strings. -// -// Needs to be further converted, because this returns an interface{}. -func (v *VARIANT) Value() interface{} { - switch v.VT { - case VT_I1: - return int8(v.Val) - case VT_UI1: - return uint8(v.Val) - case VT_I2: - return int16(v.Val) - case VT_UI2: - return uint16(v.Val) - case VT_I4: - return int32(v.Val) - case VT_UI4: - return uint32(v.Val) - case VT_I8: - return int64(v.Val) - case VT_UI8: - return uint64(v.Val) - case VT_INT: - return int(v.Val) - case VT_UINT: - return uint(v.Val) - case VT_INT_PTR: - return uintptr(v.Val) // TODO - case VT_UINT_PTR: - return uintptr(v.Val) - case VT_R4: - return *(*float32)(unsafe.Pointer(&v.Val)) - case VT_R8: - return *(*float64)(unsafe.Pointer(&v.Val)) - case VT_BSTR: - return v.ToString() - case VT_DATE: - // VT_DATE type will either return float64 or time.Time. - d := uint64(v.Val) - date, err := GetVariantDate(d) - if err != nil { - return float64(v.Val) - } - return date - case VT_UNKNOWN: - return v.ToIUnknown() - case VT_DISPATCH: - return v.ToIDispatch() - case VT_BOOL: - return v.Val != 0 - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go deleted file mode 100644 index e73736bf3..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_386.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build 386 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go deleted file mode 100644 index dccdde132..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build amd64 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go deleted file mode 100644 index d47245444..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_arm.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build arm - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go deleted file mode 100644 index 78473cec4..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_arm64.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build arm64 -// +build arm64 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go deleted file mode 100644 index 1b970f63f..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_386.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build windows,386 - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - v1 := uint32(value) - v2 := uint32(value >> 32) - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go deleted file mode 100644 index 6952f1f0d..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build windows,amd64 - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go deleted file mode 100644 index 09ec7b5cf..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_arm.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build windows,arm - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - v1 := uint32(value) - v2 := uint32(value >> 32) - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go deleted file mode 100644 index 02b04a0d4..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build windows && arm64 -// +build windows,arm64 - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - v1 := uint32(value) - v2 := uint32(value >> 32) - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go deleted file mode 100644 index 326427a7d..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64le - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go deleted file mode 100644 index 9874ca66b..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go deleted file mode 100644 index 729b4a04d..000000000 --- a/vendor/github.com/go-ole/go-ole/vt_string.go +++ /dev/null @@ -1,58 +0,0 @@ -// generated by stringer -output vt_string.go -type VT; DO NOT EDIT - -package ole - -import "fmt" - -const ( - _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" - _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" - _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" - _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" - _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" - _VT_name_5 = "VT_ARRAY" - _VT_name_6 = "VT_BYREF" - _VT_name_7 = "VT_RESERVED" - _VT_name_8 = "VT_ILLEGAL" -) - -var ( - _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} - _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} - _VT_index_2 = [...]uint8{0, 9, 19, 30} - _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} - _VT_index_4 = [...]uint8{0, 12, 21} - _VT_index_5 = [...]uint8{0, 8} - _VT_index_6 = [...]uint8{0, 8} - _VT_index_7 = [...]uint8{0, 11} - _VT_index_8 = [...]uint8{0, 10} -) - -func (i VT) String() string { - switch { - case 0 <= i && i <= 14: - return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] - case 16 <= i && i <= 31: - i -= 16 - return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] - case 36 <= i && i <= 38: - i -= 36 - return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] - case 64 <= i && i <= 72: - i -= 64 - return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] - case 4095 <= i && i <= 4096: - i -= 4095 - return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] - case i == 8192: - return _VT_name_5 - case i == 16384: - return _VT_name_6 - case i == 32768: - return _VT_name_7 - case i == 65535: - return _VT_name_8 - default: - return fmt.Sprintf("VT(%d)", i) - } -} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go deleted file mode 100644 index 4e9eca732..000000000 --- a/vendor/github.com/go-ole/go-ole/winrt.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unicode/utf8" - "unsafe" -) - -var ( - procRoInitialize = modcombase.NewProc("RoInitialize") - procRoActivateInstance = modcombase.NewProc("RoActivateInstance") - procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") - procWindowsCreateString = modcombase.NewProc("WindowsCreateString") - procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") - procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") -) - -func RoInitialize(thread_type uint32) (err error) { - hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoActivateInstance.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoGetActivationFactory.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - u16 := syscall.StringToUTF16Ptr(s) - len := uint32(utf8.RuneCountInString(s)) - hr, _, _ := procWindowsCreateString.Call( - uintptr(unsafe.Pointer(u16)), - uintptr(len), - uintptr(unsafe.Pointer(&hstring))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// String returns Go string value of HString. -func (h HString) String() string { - var u16buf uintptr - var u16len uint32 - u16buf, _, _ = procWindowsGetStringRawBuffer.Call( - uintptr(h), - uintptr(unsafe.Pointer(&u16len))) - - u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} - u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) - return syscall.UTF16ToString(u16) -} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go deleted file mode 100644 index 52e6d74c9..000000000 --- a/vendor/github.com/go-ole/go-ole/winrt_doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package ole - -// RoInitialize -func RoInitialize(thread_type uint32) (err error) { - return NewError(E_NOTIMPL) -} - -// RoActivateInstance -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// RoGetActivationFactory -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - return HString(uintptr(0)), NewError(E_NOTIMPL) -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - return NewError(E_NOTIMPL) -} - -// String returns Go string value of HString. -func (h HString) String() string { - return "" -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a2..000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208..000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da..000000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f327..000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d90..000000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd30..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c155..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd93..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7..000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdefa..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 937229386..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc81..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa99..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b64..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c7664..000000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a..000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6fea..000000000 --- a/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf069..000000000 --- a/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e09..000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e877f..000000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120c9..000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc2202f..000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c32..000000000 --- a/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 066b4323b..000000000 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 47eb3e445..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134eeb..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da5a..000000000 --- a/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e34859..000000000 --- a/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 52ccb5a93..000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,18 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Amazon.com, Inc -Damian Gryski -Eric Buth -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Klaus Post -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index ea6524ddd..000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,41 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Alex Legg -Damian Gryski -Eric Buth -Jan Mercl <0xjnml@gmail.com> -Jonathan Swinney -Kai Backman -Klaus Post -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 23c6e26c6..000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s deleted file mode 100644 index 7a3ead17e..000000000 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 - -loop: - // for s < len(src) - CMP R13, R6 - BEQ end - - // R4 = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 - MOVW $1, R1 - CMPW R1, R3 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R6, R6 - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R4 == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R4 - BGT callMemmove - CMP $16, R2 - BLT callMemmove - CMP $16, R3 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 - BGT errCorrupt - CMP R3, R4 - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // case x == 60: - MOVW $61, R1 - CMPW R1, R4 - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R6), R4 - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 - B doLit - -tagLit62Plus: - CMPW $62, R4 - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt - - // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 - BLT errCorrupt - - // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R4 - BGT slowForwardCopy - CMP $8, R5 - BLT slowForwardCopy - CMP $16, R14 - BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R14, R14 - CMP R14, R4 - BGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMP $8, R5 - BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R7, R2 - ADD R4, R7, R7 - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R4 - BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R10, R7 - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go deleted file mode 100644 index 7082b3491..000000000 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 2f672be55..000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 7f2365707..000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index f8d54adfc..000000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e714..000000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index 296d7f0be..000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692ea4..000000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore deleted file mode 100644 index 01764d1cd..000000000 --- a/vendor/github.com/google/s2a-go/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Ignore binaries without extension -//example/client/client -//example/server/server -//internal/v2/fakes2av2_server/fakes2av2_server - -.idea/ \ No newline at end of file diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md deleted file mode 100644 index dc079b4d6..000000000 --- a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,93 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of -experience, education, socio-economic status, nationality, personal appearance, -race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, or to ban temporarily or permanently any -contributor for other behaviors that they deem inappropriate, threatening, -offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when the Project -Steward has a reasonable belief that an individual's behavior may have a -negative impact on the project or its community. - -## Conflict Resolution - -We do not believe that all conflict is bad; healthy debate and disagreement -often yield positive results. However, it is never okay to be disrespectful or -to engage in behavior that violates the project’s code of conduct. - -If you see someone violating the code of conduct, you are encouraged to address -the behavior directly with those involved. Many issues can be resolved quickly -and easily, and this gives people more control over the outcome of their -dispute. If you are unable to resolve the matter for any reason, or if the -behavior is threatening or harassing, report it. We are dedicated to providing -an environment where participants feel welcome and safe. - -Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the -Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to -receive and address reported violations of the code of conduct. They will then -work with a committee consisting of representatives from the Open Source -Programs Office and the Google Open Source Strategy team. If for any reason you -are uncomfortable reaching out to the Project Steward, please email -opensource@google.com. - -We will investigate every complaint, but you may not receive a direct response. -We will use our discretion in determining when and how to follow up on reported -incidents, which may range from not taking action to permanent expulsion from -the project and project-sponsored spaces. We will notify the accused of the -report and provide them an opportunity to discuss it before any action is taken. -The identity of the reporter will be omitted from the details of the report -supplied to the accused. In potentially harmful situations, such as ongoing -harassment or threats to anyone's safety, we may take action without notice. - -## Attribution - -This Code of Conduct is adapted from the Contributor Covenant, version 1.4, -available at -https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md deleted file mode 100644 index 22b241cb7..000000000 --- a/vendor/github.com/google/s2a-go/CONTRIBUTING.md +++ /dev/null @@ -1,29 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement (CLA). You (or your employer) retain the copyright to your -contribution; this simply gives us permission to use and redistribute your -contributions as part of the project. Head over to - to see your current agreements on file or -to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows -[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff --git a/vendor/github.com/google/s2a-go/LICENSE.md b/vendor/github.com/google/s2a-go/LICENSE.md deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/google/s2a-go/LICENSE.md +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md deleted file mode 100644 index fe0f5c1da..000000000 --- a/vendor/github.com/google/s2a-go/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Secure Session Agent Client Libraries - -The Secure Session Agent is a service that enables a workload to offload select -operations from the mTLS handshake and protects a workload's private key -material from exfiltration. Specifically, the workload asks the Secure Session -Agent for the TLS configuration to use during the handshake, to perform private -key operations, and to validate the peer certificate chain. The Secure Session -Agent's client libraries enable applications to communicate with the Secure -Session Agent during the TLS handshake, and to encrypt traffic to the peer -after the TLS handshake is complete. - -This repository contains the source code for the Secure Session Agent's Go -client libraries, which allow gRPC and HTTP Go applications to use the Secure Session -Agent. diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go deleted file mode 100644 index 034d1b912..000000000 --- a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package fallback provides default implementations of fallback options when S2A fails. -package fallback - -import ( - "context" - "crypto/tls" - "fmt" - "net" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -const ( - alpnProtoStrH2 = "h2" - alpnProtoStrHTTP = "http/1.1" - defaultHTTPSPort = "443" -) - -// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. -// It supports GRPC use case, thus the alpn is set to 'h2'. -var FallbackTLSConfigGRPC = tls.Config{ - MinVersion: tls.VersionTLS13, - ClientSessionCache: nil, - NextProtos: []string{alpnProtoStrH2}, -} - -// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. -// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. -var FallbackTLSConfigHTTP = tls.Config{ - MinVersion: tls.VersionTLS13, - ClientSessionCache: nil, - NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, -} - -// ClientHandshake establishes a TLS connection and returns it, plus its auth info. -// Inputs: -// -// targetServer: the server attempted with S2A. -// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. -// If fallback is successful, the `conn` should be closed. -// err: the error encountered when performing the client-side TLS handshake with S2A. -type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) - -// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, -// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. -// Example use: -// -// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, -// FallbackOpts: &s2a.FallbackOptions{ // optional -// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), -// }, -// }) -// -// The fallback server's certificate must be verifiable using OS root store. -// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, -// it uses default port 443. -// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, -// and min TLS version is set to 1.3. -func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { - var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} - return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) -} - -func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { - fallbackServerAddr, err := processFallbackAddr(fallbackAddr) - if err != nil { - if grpclog.V(1) { - grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) - } - return nil, err - } - return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { - fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) - if fbErr != nil { - grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) - return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) - } - - tc, success := fbConn.(*tls.Conn) - if !success { - grpclog.Infof("the connection with fallback server is expected to be tls but isn't") - return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) - } - - tlsInfo := credentials.TLSInfo{ - State: tc.ConnectionState(), - CommonAuthInfo: credentials.CommonAuthInfo{ - SecurityLevel: credentials.PrivacyAndIntegrity, - }, - } - if grpclog.V(1) { - grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) - grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) - grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) - } - conn.Close() - return fbConn, tlsInfo, nil - }, nil -} - -// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. -// Example use: -// -// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) -// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, // required -// FallbackOpts: &s2a.FallbackOptions{ -// FallbackDialer: &s2a.FallbackDialer{ -// Dialer: fallbackDialer, -// ServerAddr: fallbackServerAddr, -// }, -// }, -// }) -// -// The fallback server's certificate should be verifiable using OS root store. -// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, -// it uses default port 443. -// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, -// and min TLS version is set to 1.3. -func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { - fallbackServerAddr, err := processFallbackAddr(fallbackAddr) - if err != nil { - if grpclog.V(1) { - grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) - } - return nil, "", err - } - return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil -} - -func processFallbackAddr(fallbackAddr string) (string, error) { - var fallbackServerAddr string - var err error - - if fallbackAddr == "" { - return "", fmt.Errorf("empty fallback address") - } - _, _, err = net.SplitHostPort(fallbackAddr) - if err != nil { - // fallbackAddr does not have port suffix - fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) - } else { - // FallbackServerAddr already has port suffix - fallbackServerAddr = fallbackAddr - } - return fallbackServerAddr, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go deleted file mode 100644 index aa3967f9d..000000000 --- a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package authinfo provides authentication and authorization information that -// results from the TLS handshake. -package authinfo - -import ( - "errors" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" - grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "google.golang.org/grpc/credentials" -) - -var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) - -const s2aAuthType = "s2a" - -// S2AAuthInfo exposes authentication and authorization information from the -// S2A session result to the gRPC stack. -type S2AAuthInfo struct { - s2aContext *contextpb.S2AContext - commonAuthInfo credentials.CommonAuthInfo -} - -// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. -func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { - return newS2AAuthInfo(result) -} - -func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { - if result == nil { - return nil, errors.New("NewS2aAuthInfo given nil session result") - } - return &S2AAuthInfo{ - s2aContext: &contextpb.S2AContext{ - ApplicationProtocol: result.GetApplicationProtocol(), - TlsVersion: result.GetState().GetTlsVersion(), - Ciphersuite: result.GetState().GetTlsCiphersuite(), - PeerIdentity: result.GetPeerIdentity(), - LocalIdentity: result.GetLocalIdentity(), - PeerCertFingerprint: result.GetPeerCertFingerprint(), - LocalCertFingerprint: result.GetLocalCertFingerprint(), - IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), - }, - commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, - }, nil -} - -// AuthType returns the authentication type. -func (s *S2AAuthInfo) AuthType() string { - return s2aAuthType -} - -// ApplicationProtocol returns the application protocol, e.g. "grpc". -func (s *S2AAuthInfo) ApplicationProtocol() string { - return s.s2aContext.GetApplicationProtocol() -} - -// TLSVersion returns the TLS version negotiated during the handshake. -func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { - return s.s2aContext.GetTlsVersion() -} - -// Ciphersuite returns the ciphersuite negotiated during the handshake. -func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { - return s.s2aContext.GetCiphersuite() -} - -// PeerIdentity returns the authenticated identity of the peer. -func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { - return s.s2aContext.GetPeerIdentity() -} - -// LocalIdentity returns the local identity of the application used during -// session setup. -func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { - return s.s2aContext.GetLocalIdentity() -} - -// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in -// the S2A handshake. -func (s *S2AAuthInfo) PeerCertFingerprint() []byte { - return s.s2aContext.GetPeerCertFingerprint() -} - -// LocalCertFingerprint returns the SHA256 hash of the local certificate used -// in the S2A handshake. -func (s *S2AAuthInfo) LocalCertFingerprint() []byte { - return s.s2aContext.GetLocalCertFingerprint() -} - -// IsHandshakeResumed returns true if a cached session was used to resume -// the handshake. -func (s *S2AAuthInfo) IsHandshakeResumed() bool { - return s.s2aContext.GetIsHandshakeResumed() -} - -// SecurityLevel returns the security level of the connection. -func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { - return s.commonAuthInfo.SecurityLevel -} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go deleted file mode 100644 index 8297c9a97..000000000 --- a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go +++ /dev/null @@ -1,438 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package handshaker communicates with the S2A handshaker service. -package handshaker - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "sync" - - "github.com/google/s2a-go/internal/authinfo" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "github.com/google/s2a-go/internal/record" - "github.com/google/s2a-go/internal/tokenmanager" - grpc "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -var ( - // appProtocol contains the application protocol accepted by the handshaker. - appProtocol = "grpc" - // frameLimit is the maximum size of a frame in bytes. - frameLimit = 1024 * 64 - // peerNotRespondingError is the error thrown when the peer doesn't respond. - errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") -) - -// Handshaker defines a handshaker interface. -type Handshaker interface { - // ClientHandshake starts and completes a TLS handshake from the client side, - // and returns a secure connection along with additional auth information. - ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // ServerHandshake starts and completes a TLS handshake from the server side, - // and returns a secure connection along with additional auth information. - ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) - // Close terminates the Handshaker. It should be called when the handshake - // is complete. - Close() error -} - -// ClientHandshakerOptions contains the options needed to configure the S2A -// handshaker service on the client-side. -type ClientHandshakerOptions struct { - // MinTLSVersion specifies the min TLS version supported by the client. - MinTLSVersion commonpb.TLSVersion - // MaxTLSVersion specifies the max TLS version supported by the client. - MaxTLSVersion commonpb.TLSVersion - // TLSCiphersuites is the ordered list of ciphersuites supported by the - // client. - TLSCiphersuites []commonpb.Ciphersuite - // TargetIdentities contains a list of allowed server identities. One of the - // target identities should match the peer identity in the handshake - // result; otherwise, the handshake fails. - TargetIdentities []*commonpb.Identity - // LocalIdentity is the local identity of the client application. If none is - // provided, then the S2A will choose the default identity. - LocalIdentity *commonpb.Identity - // TargetName is the allowed server name, which may be used for server - // authorization check by the S2A if it is provided. - TargetName string - // EnsureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - EnsureProcessSessionTickets *sync.WaitGroup -} - -// ServerHandshakerOptions contains the options needed to configure the S2A -// handshaker service on the server-side. -type ServerHandshakerOptions struct { - // MinTLSVersion specifies the min TLS version supported by the server. - MinTLSVersion commonpb.TLSVersion - // MaxTLSVersion specifies the max TLS version supported by the server. - MaxTLSVersion commonpb.TLSVersion - // TLSCiphersuites is the ordered list of ciphersuites supported by the - // server. - TLSCiphersuites []commonpb.Ciphersuite - // LocalIdentities is the list of local identities that may be assumed by - // the server. If no local identity is specified, then the S2A chooses a - // default local identity. - LocalIdentities []*commonpb.Identity -} - -// s2aHandshaker performs a TLS handshake using the S2A handshaker service. -type s2aHandshaker struct { - // stream is used to communicate with the S2A handshaker service. - stream s2apb.S2AService_SetUpSessionClient - // conn is the connection to the peer. - conn net.Conn - // clientOpts should be non-nil iff the handshaker is client-side. - clientOpts *ClientHandshakerOptions - // serverOpts should be non-nil iff the handshaker is server-side. - serverOpts *ServerHandshakerOptions - // isClient determines if the handshaker is client or server side. - isClient bool - // hsAddr stores the address of the S2A handshaker service. - hsAddr string - // tokenManager manages access tokens for authenticating to S2A. - tokenManager tokenmanager.AccessTokenManager - // localIdentities is the set of local identities for whom the - // tokenManager should fetch a token when preparing a request to be - // sent to S2A. - localIdentities []*commonpb.Identity -} - -// NewClientHandshaker creates an s2aHandshaker instance that performs a -// client-side TLS handshake using the S2A handshaker service. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { - stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil -} - -func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { - var localIdentities []*commonpb.Identity - if opts != nil { - localIdentities = []*commonpb.Identity{opts.LocalIdentity} - } - return &s2aHandshaker{ - stream: stream, - conn: c, - clientOpts: opts, - isClient: true, - hsAddr: hsAddr, - tokenManager: tokenManager, - localIdentities: localIdentities, - } -} - -// NewServerHandshaker creates an s2aHandshaker instance that performs a -// server-side TLS handshake using the S2A handshaker service. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { - stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil -} - -func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { - var localIdentities []*commonpb.Identity - if opts != nil { - localIdentities = opts.LocalIdentities - } - return &s2aHandshaker{ - stream: stream, - conn: c, - serverOpts: opts, - isClient: false, - hsAddr: hsAddr, - tokenManager: tokenManager, - localIdentities: localIdentities, - } -} - -// ClientHandshake performs a client-side TLS handshake using the S2A handshaker -// service. When complete, returns a TLS connection. -func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { - if !h.isClient { - return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") - } - // Extract the hostname from the target name. The target name is assumed to be an authority. - hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) - if err != nil { - // If the target name had no host port or could not be parsed, use it as is. - hostname = h.clientOpts.TargetName - } - - // Prepare a client start message to send to the S2A handshaker service. - req := &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ClientStart{ - ClientStart: &s2apb.ClientSessionStartReq{ - ApplicationProtocols: []string{appProtocol}, - MinTlsVersion: h.clientOpts.MinTLSVersion, - MaxTlsVersion: h.clientOpts.MaxTLSVersion, - TlsCiphersuites: h.clientOpts.TLSCiphersuites, - TargetIdentities: h.clientOpts.TargetIdentities, - LocalIdentity: h.clientOpts.LocalIdentity, - TargetName: hostname, - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - } - conn, result, err := h.setUpSession(req) - if err != nil { - return nil, nil, err - } - authInfo, err := authinfo.NewS2AAuthInfo(result) - if err != nil { - return nil, nil, err - } - return conn, authInfo, nil -} - -// ServerHandshake performs a server-side TLS handshake using the S2A handshaker -// service. When complete, returns a TLS connection. -func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { - if h.isClient { - return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") - } - p := make([]byte, frameLimit) - n, err := h.conn.Read(p) - if err != nil { - return nil, nil, err - } - // Prepare a server start message to send to the S2A handshaker service. - req := &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ServerStart{ - ServerStart: &s2apb.ServerSessionStartReq{ - ApplicationProtocols: []string{appProtocol}, - MinTlsVersion: h.serverOpts.MinTLSVersion, - MaxTlsVersion: h.serverOpts.MaxTLSVersion, - TlsCiphersuites: h.serverOpts.TLSCiphersuites, - LocalIdentities: h.serverOpts.LocalIdentities, - InBytes: p[:n], - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - } - conn, result, err := h.setUpSession(req) - if err != nil { - return nil, nil, err - } - authInfo, err := authinfo.NewS2AAuthInfo(result) - if err != nil { - return nil, nil, err - } - return conn, authInfo, nil -} - -// setUpSession proxies messages between the peer and the S2A handshaker -// service. -func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { - resp, err := h.accessHandshakerService(req) - if err != nil { - return nil, nil, err - } - // Check if the returned status is an error. - if resp.GetStatus() != nil { - if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { - return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) - } - } - // Calculate the extra unread bytes from the Session. Attempting to consume - // more than the bytes sent will throw an error. - var extra []byte - if req.GetServerStart() != nil { - if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { - return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") - } - extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] - } - result, extra, err := h.processUntilDone(resp, extra) - if err != nil { - return nil, nil, err - } - if result.GetLocalIdentity() == nil { - return nil, nil, errors.New("local identity must be populated in session result") - } - - // Create a new TLS record protocol using the Session Result. - newConn, err := record.NewConn(&record.ConnParameters{ - NetConn: h.conn, - Ciphersuite: result.GetState().GetTlsCiphersuite(), - TLSVersion: result.GetState().GetTlsVersion(), - InTrafficSecret: result.GetState().GetInKey(), - OutTrafficSecret: result.GetState().GetOutKey(), - UnusedBuf: extra, - InSequence: result.GetState().GetInSequence(), - OutSequence: result.GetState().GetOutSequence(), - HSAddr: h.hsAddr, - ConnectionID: result.GetState().GetConnectionId(), - LocalIdentity: result.GetLocalIdentity(), - EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), - }) - if err != nil { - return nil, nil, err - } - return newConn, result, nil -} - -func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { - if h.clientOpts == nil { - return nil - } - return h.clientOpts.EnsureProcessSessionTickets -} - -// accessHandshakerService sends the session request to the S2A handshaker -// service and returns the session response. -func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { - if err := h.stream.Send(req); err != nil { - return nil, err - } - resp, err := h.stream.Recv() - if err != nil { - return nil, err - } - return resp, nil -} - -// processUntilDone continues proxying messages between the peer and the S2A -// handshaker service until the handshaker service returns the SessionResult at -// the end of the handshake or an error occurs. -func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { - for { - if len(resp.OutFrames) > 0 { - if _, err := h.conn.Write(resp.OutFrames); err != nil { - return nil, nil, err - } - } - if resp.Result != nil { - return resp.Result, unusedBytes, nil - } - buf := make([]byte, frameLimit) - n, err := h.conn.Read(buf) - if err != nil && err != io.EOF { - return nil, nil, err - } - // If there is nothing to send to the handshaker service and nothing is - // received from the peer, then we are stuck. This covers the case when - // the peer is not responding. Note that handshaker service connection - // issues are caught in accessHandshakerService before we even get - // here. - if len(resp.OutFrames) == 0 && n == 0 { - return nil, nil, errPeerNotResponding - } - // Append extra bytes from the previous interaction with the handshaker - // service with the current buffer read from conn. - p := append(unusedBytes, buf[:n]...) - // From here on, p and unusedBytes point to the same slice. - resp, err = h.accessHandshakerService(&s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_Next{ - Next: &s2apb.SessionNextReq{ - InBytes: p, - }, - }, - AuthMechanisms: h.getAuthMechanisms(), - }) - if err != nil { - return nil, nil, err - } - - // Cache the local identity returned by S2A, if it is populated. This - // overwrites any existing local identities. This is done because, once the - // S2A has selected a local identity, then only that local identity should - // be asserted in future requests until the end of the current handshake. - if resp.GetLocalIdentity() != nil { - h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} - } - - // Set unusedBytes based on the handshaker service response. - if resp.GetBytesConsumed() > uint32(len(p)) { - return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") - } - unusedBytes = p[resp.GetBytesConsumed():] - } -} - -// Close shuts down the handshaker and the stream to the S2A handshaker service -// when the handshake is complete. It should be called when the caller obtains -// the secure connection at the end of the handshake. -func (h *s2aHandshaker) Close() error { - return h.stream.CloseSend() -} - -func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { - if h.tokenManager == nil { - return nil - } - // First handle the special case when no local identities have been provided - // by the application. In this case, an AuthenticationMechanism with no local - // identity will be sent. - if len(h.localIdentities) == 0 { - token, err := h.tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("unable to get token for empty local identity: %v", err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - - // Next, handle the case where the application (or the S2A) has provided - // one or more local identities. - var authMechanisms []*s2apb.AuthenticationMechanism - for _, localIdentity := range h.localIdentities { - token, err := h.tokenManager.Token(localIdentity) - if err != nil { - grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) - continue - } - - authMechanism := &s2apb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - } - authMechanisms = append(authMechanisms, authMechanism) - } - return authMechanisms -} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go deleted file mode 100644 index ed4496537..000000000 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package service is a utility for calling the S2A handshaker service. -package service - -import ( - "context" - "sync" - - grpc "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -var ( - // mu guards hsConnMap and hsDialer. - mu sync.Mutex - // hsConnMap represents a mapping from an S2A handshaker service address - // to a corresponding connection to an S2A handshaker service instance. - hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.DialContext -) - -// Dial dials the S2A handshaker service. If a connection has already been -// established, this function returns it. Otherwise, a new connection is -// created. -func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) { - mu.Lock() - defer mu.Unlock() - - hsConn, ok := hsConnMap[handshakerServiceAddress] - if !ok { - // Create a new connection to the S2A handshaker service. Note that - // this connection stays open until the application is closed. - var grpcOpts []grpc.DialOption - if transportCreds != nil { - grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds)) - } else { - grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - var err error - hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...) - if err != nil { - return nil, err - } - hsConnMap[handshakerServiceAddress] = hsConn - } - return hsConn, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go deleted file mode 100644 index 16278a1d9..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/common/common.proto - -package common_go_proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The ciphersuites supported by S2A. The name determines the confidentiality, -// and authentication ciphers as well as the hash algorithm used for PRF in -// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: -// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. -// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. -type Ciphersuite int32 - -const ( - Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 - Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 - Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 -) - -// Enum value maps for Ciphersuite. -var ( - Ciphersuite_name = map[int32]string{ - 0: "AES_128_GCM_SHA256", - 1: "AES_256_GCM_SHA384", - 2: "CHACHA20_POLY1305_SHA256", - } - Ciphersuite_value = map[string]int32{ - "AES_128_GCM_SHA256": 0, - "AES_256_GCM_SHA384": 1, - "CHACHA20_POLY1305_SHA256": 2, - } -) - -func (x Ciphersuite) Enum() *Ciphersuite { - p := new(Ciphersuite) - *p = x - return p -} - -func (x Ciphersuite) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() -} - -func (Ciphersuite) Type() protoreflect.EnumType { - return &file_internal_proto_common_common_proto_enumTypes[0] -} - -func (x Ciphersuite) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Ciphersuite.Descriptor instead. -func (Ciphersuite) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} -} - -// The TLS versions supported by S2A's handshaker module. -type TLSVersion int32 - -const ( - TLSVersion_TLS1_2 TLSVersion = 0 - TLSVersion_TLS1_3 TLSVersion = 1 -) - -// Enum value maps for TLSVersion. -var ( - TLSVersion_name = map[int32]string{ - 0: "TLS1_2", - 1: "TLS1_3", - } - TLSVersion_value = map[string]int32{ - "TLS1_2": 0, - "TLS1_3": 1, - } -) - -func (x TLSVersion) Enum() *TLSVersion { - p := new(TLSVersion) - *p = x - return p -} - -func (x TLSVersion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() -} - -func (TLSVersion) Type() protoreflect.EnumType { - return &file_internal_proto_common_common_proto_enumTypes[1] -} - -func (x TLSVersion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TLSVersion.Descriptor instead. -func (TLSVersion) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} -} - -type Identity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to IdentityOneof: - // - // *Identity_SpiffeId - // *Identity_Hostname - // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId - IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` - // Additional identity-specific attributes. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Identity) Reset() { - *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_common_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Identity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Identity) ProtoMessage() {} - -func (x *Identity) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_common_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Identity.ProtoReflect.Descriptor instead. -func (*Identity) Descriptor() ([]byte, []int) { - return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} -} - -func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { - if m != nil { - return m.IdentityOneof - } - return nil -} - -func (x *Identity) GetSpiffeId() string { - if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { - return x.SpiffeId - } - return "" -} - -func (x *Identity) GetHostname() string { - if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { - return x.Hostname - } - return "" -} - -func (x *Identity) GetUid() string { - if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { - return x.Uid - } - return "" -} - -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername - } - return "" -} - -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId - } - return "" -} - -func (x *Identity) GetAttributes() map[string]string { - if x != nil { - return x.Attributes - } - return nil -} - -type isIdentity_IdentityOneof interface { - isIdentity_IdentityOneof() -} - -type Identity_SpiffeId struct { - // The SPIFFE ID of a connection endpoint. - SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` -} - -type Identity_Hostname struct { - // The hostname of a connection endpoint. - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` -} - -type Identity_Uid struct { - // The UID of a connection endpoint. - Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` -} - -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` -} - -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` -} - -func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} - -func (*Identity_Hostname) isIdentity_IdentityOneof() {} - -func (*Identity_Uid) isIdentity_IdentityOneof() {} - -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} - -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} - -var File_internal_proto_common_common_proto protoreflect.FileDescriptor - -var file_internal_proto_common_common_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, - 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_common_common_proto_rawDescOnce sync.Once - file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc -) - -func file_internal_proto_common_common_proto_rawDescGZIP() []byte { - file_internal_proto_common_common_proto_rawDescOnce.Do(func() { - file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) - }) - return file_internal_proto_common_common_proto_rawDescData -} - -var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ - (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite - (TLSVersion)(0), // 1: s2a.proto.TLSVersion - (*Identity)(nil), // 2: s2a.proto.Identity - nil, // 3: s2a.proto.Identity.AttributesEntry -} -var file_internal_proto_common_common_proto_depIdxs = []int32{ - 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_proto_common_common_proto_init() } -func file_internal_proto_common_common_proto_init() { - if File_internal_proto_common_common_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Identity_SpiffeId)(nil), - (*Identity_Hostname)(nil), - (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_common_common_proto_rawDesc, - NumEnums: 2, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_common_common_proto_goTypes, - DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, - EnumInfos: file_internal_proto_common_common_proto_enumTypes, - MessageInfos: file_internal_proto_common_common_proto_msgTypes, - }.Build() - File_internal_proto_common_common_proto = out.File - file_internal_proto_common_common_proto_rawDesc = nil - file_internal_proto_common_common_proto_goTypes = nil - file_internal_proto_common_common_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go deleted file mode 100644 index f4f763ae1..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/s2a_context/s2a_context.proto - -package s2a_context_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type S2AContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this connection, e.g., 'grpc'. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The TLS version number that the S2A's handshaker module used to set up the - // session. - TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` - // The TLS ciphersuite negotiated by the S2A's handshaker module. - Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the peer certificate used in the handshake. - PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` - // The SHA256 hash of the local certificate used in the handshake. - LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` - // Set to true if a cached session was reused to resume the handshake. - IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` -} - -func (x *S2AContext) Reset() { - *x = S2AContext{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *S2AContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S2AContext) ProtoMessage() {} - -func (x *S2AContext) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. -func (*S2AContext) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} -} - -func (x *S2AContext) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.TlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuite - } - return common_go_proto.Ciphersuite(0) -} - -func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *S2AContext) GetPeerCertFingerprint() []byte { - if x != nil { - return x.PeerCertFingerprint - } - return nil -} - -func (x *S2AContext) GetLocalCertFingerprint() []byte { - if x != nil { - return x.LocalCertFingerprint - } - return nil -} - -func (x *S2AContext) GetIsHandshakeResumed() bool { - if x != nil { - return x.IsHandshakeResumed - } - return false -} - -var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor - -var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, - 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, - 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, - 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, - 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, - 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once - file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc -) - -func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { - file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { - file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) - }) - return file_internal_proto_s2a_context_s2a_context_proto_rawDescData -} - -var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ - (*S2AContext)(nil), // 0: s2a.proto.S2AContext - (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion - (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite - (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity -} -var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion - 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite - 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity - 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } -func file_internal_proto_s2a_context_s2a_context_proto_init() { - if File_internal_proto_s2a_context_s2a_context_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S2AContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, - DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, - MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, - }.Build() - File_internal_proto_s2a_context_s2a_context_proto = out.File - file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil - file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil - file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go deleted file mode 100644 index 0a86ebee5..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ /dev/null @@ -1,1377 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/s2a/s2a.proto - -package s2a_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AuthenticationMechanism struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // (Optional) Application may specify an identity associated to an - // authentication mechanism. Otherwise, S2A assumes that the authentication - // mechanism is associated with the default identity. If the default identity - // cannot be determined, session setup fails. - Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` - // Types that are assignable to MechanismOneof: - // - // *AuthenticationMechanism_Token - MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` -} - -func (x *AuthenticationMechanism) Reset() { - *x = AuthenticationMechanism{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthenticationMechanism) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthenticationMechanism) ProtoMessage() {} - -func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. -func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { - if x != nil { - return x.Identity - } - return nil -} - -func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { - if m != nil { - return m.MechanismOneof - } - return nil -} - -func (x *AuthenticationMechanism) GetToken() string { - if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { - return x.Token - } - return "" -} - -type isAuthenticationMechanism_MechanismOneof interface { - isAuthenticationMechanism_MechanismOneof() -} - -type AuthenticationMechanism_Token struct { - // A token that the application uses to authenticate itself to the S2A. - Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` -} - -func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} - -type ClientSessionStartReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the client, e.g., "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // (Optional) The minimum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the minimum version it supports. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` - // (Optional) The maximum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the maximum version it supports. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` - // The TLS ciphersuites that the client is willing to support. - TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` - // (Optional) Describes which server identities are acceptable by the client. - // If target identities are provided and none of them matches the peer - // identity of the server, session setup fails. - TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` - // (Optional) Application may specify a local identity. Otherwise, S2A chooses - // the default local identity. If the default identity cannot be determined, - // session setup fails. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The target name that is used by S2A to configure SNI in the TLS handshake. - // It is also used to perform server authorization check if avaiable. This - // check is intended to verify that the peer authenticated identity is - // authorized to run a service with the target name. - // This field MUST only contain the host portion of the server address. It - // MUST not contain the scheme or the port number. For example, if the server - // address is dns://www.example.com:443, the value of this field should be - // set to www.example.com. - TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` -} - -func (x *ClientSessionStartReq) Reset() { - *x = ClientSessionStartReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ClientSessionStartReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ClientSessionStartReq) ProtoMessage() {} - -func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. -func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} -} - -func (x *ClientSessionStartReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuites - } - return nil -} - -func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { - if x != nil { - return x.TargetIdentities - } - return nil -} - -func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *ClientSessionStartReq) GetTargetName() string { - if x != nil { - return x.TargetName - } - return "" -} - -type ServerSessionStartReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocols supported by the server, e.g., "grpc". - ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` - // (Optional) The minimum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the minimum version it supports. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` - // (Optional) The maximum TLS version number that the S2A's handshaker module - // will use to set up the session. If this field is not provided, S2A will use - // the maximum version it supports. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` - // The TLS ciphersuites that the server is willing to support. - TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` - // (Optional) A list of local identities supported by the server, if - // specified. Otherwise, S2A chooses the default local identity. If the - // default identity cannot be determined, session setup fails. - LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` - // The byte representation of the first handshake message received from the - // client peer. It is possible that this first message is split into multiple - // chunks. In this case, the first chunk is sent using this field and the - // following chunks are sent using the in_bytes field of SessionNextReq - // Specifically, if the client peer is using S2A, this field contains the - // bytes in the out_frames field of SessionResp message that the client peer - // received from its S2A after initiating the handshake. - InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *ServerSessionStartReq) Reset() { - *x = ServerSessionStartReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerSessionStartReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerSessionStartReq) ProtoMessage() {} - -func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. -func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} -} - -func (x *ServerSessionStartReq) GetApplicationProtocols() []string { - if x != nil { - return x.ApplicationProtocols - } - return nil -} - -func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuites - } - return nil -} - -func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { - if x != nil { - return x.LocalIdentities - } - return nil -} - -func (x *ServerSessionStartReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type SessionNextReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The byte representation of session setup, i.e., handshake messages. - // Specifically: - // - All handshake messages sent from the server to the client. - // - All, except for the first, handshake messages sent from the client to - // the server. Note that the first message is communicated to S2A using the - // in_bytes field of ServerSessionStartReq. - // - // If the peer is using S2A, this field contains the bytes in the out_frames - // field of SessionResp message that the peer received from its S2A. - InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *SessionNextReq) Reset() { - *x = SessionNextReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionNextReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionNextReq) ProtoMessage() {} - -func (x *SessionNextReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. -func (*SessionNextReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} -} - -func (x *SessionNextReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type ResumptionTicketReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The byte representation of a NewSessionTicket message received from the - // server. - InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` - // A connection identifier that was created and sent by S2A at the end of a - // handshake. - ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` - // The local identity that was used by S2A during session setup and included - // in |SessionResult|. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` -} - -func (x *ResumptionTicketReq) Reset() { - *x = ResumptionTicketReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResumptionTicketReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResumptionTicketReq) ProtoMessage() {} - -func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. -func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} -} - -func (x *ResumptionTicketReq) GetInBytes() [][]byte { - if x != nil { - return x.InBytes - } - return nil -} - -func (x *ResumptionTicketReq) GetConnectionId() uint64 { - if x != nil { - return x.ConnectionId - } - return 0 -} - -func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -type SessionReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ReqOneof: - // - // *SessionReq_ClientStart - // *SessionReq_ServerStart - // *SessionReq_Next - // *SessionReq_ResumptionTicket - ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` - // (Optional) The authentication mechanisms that the client wishes to use to - // authenticate to the S2A, ordered by preference. The S2A will always use the - // first authentication mechanism that appears in the list and is supported by - // the S2A. - AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` -} - -func (x *SessionReq) Reset() { - *x = SessionReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionReq) ProtoMessage() {} - -func (x *SessionReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. -func (*SessionReq) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} -} - -func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *SessionReq) GetClientStart() *ClientSessionStartReq { - if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { - return x.ClientStart - } - return nil -} - -func (x *SessionReq) GetServerStart() *ServerSessionStartReq { - if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { - return x.ServerStart - } - return nil -} - -func (x *SessionReq) GetNext() *SessionNextReq { - if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { - return x.Next - } - return nil -} - -func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { - if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { - return x.ResumptionTicket - } - return nil -} - -func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { - if x != nil { - return x.AuthMechanisms - } - return nil -} - -type isSessionReq_ReqOneof interface { - isSessionReq_ReqOneof() -} - -type SessionReq_ClientStart struct { - // The client session setup request message. - ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` -} - -type SessionReq_ServerStart struct { - // The server session setup request message. - ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` -} - -type SessionReq_Next struct { - // The next session setup message request message. - Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` -} - -type SessionReq_ResumptionTicket struct { - // The resumption ticket that is received from the server. This message is - // only accepted by S2A if it is running as a client and if it is received - // after session setup is complete. If S2A is running as a server and it - // receives this message, the session is terminated. - ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` -} - -func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} - -func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} - -func (*SessionReq_Next) isSessionReq_ReqOneof() {} - -func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} - -type SessionState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The TLS version number that the S2A's handshaker module used to set up the - // session. - TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` - // The TLS ciphersuite negotiated by the S2A's handshaker module. - TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` - // The sequence number of the next, incoming, TLS record. - InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` - // The sequence number of the next, outgoing, TLS record. - OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` - // The key for the inbound direction. - InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` - // The key for the outbound direction. - OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` - // The constant part of the record nonce for the outbound direction. - InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` - // The constant part of the record nonce for the inbound direction. - OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` - // A connection identifier that can be provided to S2A to perform operations - // related to this connection. This identifier will be stored by the record - // protocol, and included in the |ResumptionTicketReq| message that is later - // sent back to S2A. This field is set only for client-side connections. - ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` - // Set to true if a cached session was reused to do an abbreviated handshake. - IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` -} - -func (x *SessionState) Reset() { - *x = SessionState{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionState) ProtoMessage() {} - -func (x *SessionState) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. -func (*SessionState) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} -} - -func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.TlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { - if x != nil { - return x.TlsCiphersuite - } - return common_go_proto.Ciphersuite(0) -} - -func (x *SessionState) GetInSequence() uint64 { - if x != nil { - return x.InSequence - } - return 0 -} - -func (x *SessionState) GetOutSequence() uint64 { - if x != nil { - return x.OutSequence - } - return 0 -} - -func (x *SessionState) GetInKey() []byte { - if x != nil { - return x.InKey - } - return nil -} - -func (x *SessionState) GetOutKey() []byte { - if x != nil { - return x.OutKey - } - return nil -} - -func (x *SessionState) GetInFixedNonce() []byte { - if x != nil { - return x.InFixedNonce - } - return nil -} - -func (x *SessionState) GetOutFixedNonce() []byte { - if x != nil { - return x.OutFixedNonce - } - return nil -} - -func (x *SessionState) GetConnectionId() uint64 { - if x != nil { - return x.ConnectionId - } - return 0 -} - -func (x *SessionState) GetIsHandshakeResumed() bool { - if x != nil { - return x.IsHandshakeResumed - } - return false -} - -type SessionResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The application protocol negotiated for this session. - ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` - // The session state at the end. This state contains all cryptographic - // material required to initialize the record protocol object. - State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - // The authenticated identity of the peer. - PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the local certificate used in the handshake. - LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` - // The SHA256 hash of the peer certificate used in the handshake. - PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` -} - -func (x *SessionResult) Reset() { - *x = SessionResult{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResult) ProtoMessage() {} - -func (x *SessionResult) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. -func (*SessionResult) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} -} - -func (x *SessionResult) GetApplicationProtocol() string { - if x != nil { - return x.ApplicationProtocol - } - return "" -} - -func (x *SessionResult) GetState() *SessionState { - if x != nil { - return x.State - } - return nil -} - -func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { - if x != nil { - return x.PeerIdentity - } - return nil -} - -func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionResult) GetLocalCertFingerprint() []byte { - if x != nil { - return x.LocalCertFingerprint - } - return nil -} - -func (x *SessionResult) GetPeerCertFingerprint() []byte { - if x != nil { - return x.PeerCertFingerprint - } - return nil -} - -type SessionStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code that is specific to the application and the implementation - // of S2A, e.g., gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *SessionStatus) Reset() { - *x = SessionStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionStatus) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionStatus) ProtoMessage() {} - -func (x *SessionStatus) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. -func (*SessionStatus) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} -} - -func (x *SessionStatus) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *SessionStatus) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type SessionResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The local identity used during session setup. This could be: - // - The local identity that the client specifies in ClientSessionStartReq. - // - One of the local identities that the server specifies in - // ServerSessionStartReq. - // - If neither client or server specifies local identities, the S2A picks the - // default one. In this case, this field will contain that identity. - // - // If the SessionResult is populated, then this must coincide with the local - // identity specified in the SessionResult; otherwise, the handshake must - // fail. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The byte representation of the frames that should be sent to the peer. May - // be empty if nothing needs to be sent to the peer or if in_bytes in the - // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent - // to the peer even if the session setup status is not OK as these frames may - // contain appropriate alerts. - OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` - // Number of bytes in the in_bytes field that are consumed by S2A. It is - // possible that part of in_bytes is unrelated to the session setup process. - BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` - // This is set if the session is successfully set up. out_frames may - // still be set to frames that needs to be forwarded to the peer. - Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` - // Status of session setup at the current stage. - Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *SessionResp) Reset() { - *x = SessionResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResp) ProtoMessage() {} - -func (x *SessionResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. -func (*SessionResp) Descriptor() ([]byte, []int) { - return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} -} - -func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionResp) GetOutFrames() []byte { - if x != nil { - return x.OutFrames - } - return nil -} - -func (x *SessionResp) GetBytesConsumed() uint32 { - if x != nil { - return x.BytesConsumed - } - return 0 -} - -func (x *SessionResp) GetResult() *SessionResult { - if x != nil { - return x.Result - } - return nil -} - -func (x *SessionResp) GetStatus() *SessionStatus { - if x != nil { - return x.Status - } - return nil -} - -var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor - -var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, - 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, - 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, - 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, - 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, - 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, - 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, - 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, - 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, - 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, - 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, - 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, - 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, - 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, - 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, - 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, - 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, - 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, - 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, - 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, - 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, - 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, - 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, - 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, - 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, - 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, - 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, - 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, - 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, - 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, - 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, - 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once - file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc -) - -func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { - file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { - file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) - }) - return file_internal_proto_s2a_s2a_proto_rawDescData -} - -var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ - (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism - (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq - (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq - (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq - (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq - (*SessionReq)(nil), // 5: s2a.proto.SessionReq - (*SessionState)(nil), // 6: s2a.proto.SessionState - (*SessionResult)(nil), // 7: s2a.proto.SessionResult - (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus - (*SessionResp)(nil), // 9: s2a.proto.SessionResp - (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity - (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion - (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite -} -var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ - 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity - 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion - 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion - 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite - 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity - 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity - 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion - 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion - 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite - 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity - 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity - 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq - 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq - 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq - 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq - 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism - 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion - 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite - 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState - 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity - 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity - 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity - 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult - 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus - 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq - 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp - 25, // [25:26] is the sub-list for method output_type - 24, // [24:25] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name -} - -func init() { file_internal_proto_s2a_s2a_proto_init() } -func file_internal_proto_s2a_s2a_proto_init() { - if File_internal_proto_s2a_s2a_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticationMechanism); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientSessionStartReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerSessionStartReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionNextReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResumptionTicketReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AuthenticationMechanism_Token)(nil), - } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*SessionReq_ClientStart)(nil), - (*SessionReq_ServerStart)(nil), - (*SessionReq_Next)(nil), - (*SessionReq_ResumptionTicket)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, - NumEnums: 0, - NumMessages: 10, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, - DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, - MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, - }.Build() - File_internal_proto_s2a_s2a_proto = out.File - file_internal_proto_s2a_s2a_proto_rawDesc = nil - file_internal_proto_s2a_s2a_proto_goTypes = nil - file_internal_proto_s2a_s2a_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go deleted file mode 100644 index 0fa582fc8..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.12 -// source: internal/proto/s2a/s2a.proto - -package s2a_go_proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" -) - -// S2AServiceClient is the client API for S2AService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type S2AServiceClient interface { - // S2A service accepts a stream of session setup requests and returns a stream - // of session setup responses. The client of this service is expected to send - // exactly one client_start or server_start message followed by at least one - // next message. Applications running TLS clients can send requests with - // resumption_ticket messages only after the session is successfully set up. - // - // Every time S2A client sends a request, this service sends a response. - // However, clients do not have to wait for service response before sending - // the next request. - SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) -} - -type s2AServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { - return &s2AServiceClient{cc} -} - -func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &s2AServiceSetUpSessionClient{stream} - return x, nil -} - -type S2AService_SetUpSessionClient interface { - Send(*SessionReq) error - Recv() (*SessionResp, error) - grpc.ClientStream -} - -type s2AServiceSetUpSessionClient struct { - grpc.ClientStream -} - -func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { - m := new(SessionResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AServiceServer is the server API for S2AService service. -// All implementations must embed UnimplementedS2AServiceServer -// for forward compatibility -type S2AServiceServer interface { - // S2A service accepts a stream of session setup requests and returns a stream - // of session setup responses. The client of this service is expected to send - // exactly one client_start or server_start message followed by at least one - // next message. Applications running TLS clients can send requests with - // resumption_ticket messages only after the session is successfully set up. - // - // Every time S2A client sends a request, this service sends a response. - // However, clients do not have to wait for service response before sending - // the next request. - SetUpSession(S2AService_SetUpSessionServer) error - mustEmbedUnimplementedS2AServiceServer() -} - -// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. -type UnimplementedS2AServiceServer struct { -} - -func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { - return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") -} -func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} - -// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to S2AServiceServer will -// result in compilation errors. -type UnsafeS2AServiceServer interface { - mustEmbedUnimplementedS2AServiceServer() -} - -func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { - s.RegisterService(&S2AService_ServiceDesc, srv) -} - -func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) -} - -type S2AService_SetUpSessionServer interface { - Send(*SessionResp) error - Recv() (*SessionReq, error) - grpc.ServerStream -} - -type s2AServiceSetUpSessionServer struct { - grpc.ServerStream -} - -func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { - m := new(SessionReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var S2AService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "s2a.proto.S2AService", - HandlerType: (*S2AServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "SetUpSession", - Handler: _S2AService_SetUpSession_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/proto/s2a/s2a.proto", -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go deleted file mode 100644 index c84bed977..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/common/common.proto - -package common_go_proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using -// S2A. -type Ciphersuite int32 - -const ( - Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 - Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 - Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 -) - -// Enum value maps for Ciphersuite. -var ( - Ciphersuite_name = map[int32]string{ - 0: "CIPHERSUITE_UNSPECIFIED", - 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - } - Ciphersuite_value = map[string]int32{ - "CIPHERSUITE_UNSPECIFIED": 0, - "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, - "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, - "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, - "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, - "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, - "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, - } -) - -func (x Ciphersuite) Enum() *Ciphersuite { - p := new(Ciphersuite) - *p = x - return p -} - -func (x Ciphersuite) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() -} - -func (Ciphersuite) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[0] -} - -func (x Ciphersuite) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Ciphersuite.Descriptor instead. -func (Ciphersuite) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} -} - -// The TLS versions supported by S2A's handshaker module. -type TLSVersion int32 - -const ( - TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 - TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 - TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 - TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 - TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 -) - -// Enum value maps for TLSVersion. -var ( - TLSVersion_name = map[int32]string{ - 0: "TLS_VERSION_UNSPECIFIED", - 1: "TLS_VERSION_1_0", - 2: "TLS_VERSION_1_1", - 3: "TLS_VERSION_1_2", - 4: "TLS_VERSION_1_3", - } - TLSVersion_value = map[string]int32{ - "TLS_VERSION_UNSPECIFIED": 0, - "TLS_VERSION_1_0": 1, - "TLS_VERSION_1_1": 2, - "TLS_VERSION_1_2": 3, - "TLS_VERSION_1_3": 4, - } -) - -func (x TLSVersion) Enum() *TLSVersion { - p := new(TLSVersion) - *p = x - return p -} - -func (x TLSVersion) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() -} - -func (TLSVersion) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[1] -} - -func (x TLSVersion) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TLSVersion.Descriptor instead. -func (TLSVersion) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} -} - -// The side in the TLS connection. -type ConnectionSide int32 - -const ( - ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 - ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 - ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 -) - -// Enum value maps for ConnectionSide. -var ( - ConnectionSide_name = map[int32]string{ - 0: "CONNECTION_SIDE_UNSPECIFIED", - 1: "CONNECTION_SIDE_CLIENT", - 2: "CONNECTION_SIDE_SERVER", - } - ConnectionSide_value = map[string]int32{ - "CONNECTION_SIDE_UNSPECIFIED": 0, - "CONNECTION_SIDE_CLIENT": 1, - "CONNECTION_SIDE_SERVER": 2, - } -) - -func (x ConnectionSide) Enum() *ConnectionSide { - p := new(ConnectionSide) - *p = x - return p -} - -func (x ConnectionSide) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() -} - -func (ConnectionSide) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[2] -} - -func (x ConnectionSide) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ConnectionSide.Descriptor instead. -func (ConnectionSide) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} -} - -// The ALPN protocols that the application can negotiate during a TLS handshake. -type AlpnProtocol int32 - -const ( - AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 - AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 - AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 - AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 -) - -// Enum value maps for AlpnProtocol. -var ( - AlpnProtocol_name = map[int32]string{ - 0: "ALPN_PROTOCOL_UNSPECIFIED", - 1: "ALPN_PROTOCOL_GRPC", - 2: "ALPN_PROTOCOL_HTTP2", - 3: "ALPN_PROTOCOL_HTTP1_1", - } - AlpnProtocol_value = map[string]int32{ - "ALPN_PROTOCOL_UNSPECIFIED": 0, - "ALPN_PROTOCOL_GRPC": 1, - "ALPN_PROTOCOL_HTTP2": 2, - "ALPN_PROTOCOL_HTTP1_1": 3, - } -) - -func (x AlpnProtocol) Enum() *AlpnProtocol { - p := new(AlpnProtocol) - *p = x - return p -} - -func (x AlpnProtocol) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() -} - -func (AlpnProtocol) Type() protoreflect.EnumType { - return &file_internal_proto_v2_common_common_proto_enumTypes[3] -} - -func (x AlpnProtocol) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use AlpnProtocol.Descriptor instead. -func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} -} - -var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, - 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once - file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc -) - -func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { - file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) - }) - return file_internal_proto_v2_common_common_proto_rawDescData -} - -var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ - (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite - (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion - (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide - (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol -} -var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_common_common_proto_init() } -func file_internal_proto_v2_common_common_proto_init() { - if File_internal_proto_v2_common_common_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, - NumEnums: 4, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_v2_common_common_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, - EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, - }.Build() - File_internal_proto_v2_common_common_proto = out.File - file_internal_proto_v2_common_common_proto_rawDesc = nil - file_internal_proto_v2_common_common_proto_goTypes = nil - file_internal_proto_v2_common_common_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go deleted file mode 100644 index b7fd871c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/s2a_context/s2a_context.proto - -package s2a_context_go_proto - -import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type S2AContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The SPIFFE ID from the peer leaf certificate, if present. - // - // This field is only populated if the leaf certificate is a valid SPIFFE - // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid - // SPIFFE ID. - LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` - // The URIs that are present in the SubjectAltName extension of the peer leaf - // certificate. - // - // Note that the extracted URIs are not validated and may not be properly - // formatted. - LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` - // The DNSNames that are present in the SubjectAltName extension of the peer - // leaf certificate. - LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` - // The (ordered) list of fingerprints in the certificate chain used to verify - // the given leaf certificate. The order MUST be from leaf certificate - // fingerprint to root certificate fingerprint. - // - // A fingerprint is the base-64 encoding of the SHA256 hash of the - // DER-encoding of a certificate. The list MAY be populated even if the peer - // certificate chain was NOT validated successfully. - PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` - // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The SHA256 hash of the DER-encoding of the local leaf certificate used in - // the handshake. - LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` -} - -func (x *S2AContext) Reset() { - *x = S2AContext{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *S2AContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S2AContext) ProtoMessage() {} - -func (x *S2AContext) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. -func (*S2AContext) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} -} - -func (x *S2AContext) GetLeafCertSpiffeId() string { - if x != nil { - return x.LeafCertSpiffeId - } - return "" -} - -func (x *S2AContext) GetLeafCertUris() []string { - if x != nil { - return x.LeafCertUris - } - return nil -} - -func (x *S2AContext) GetLeafCertDnsnames() []string { - if x != nil { - return x.LeafCertDnsnames - } - return nil -} - -func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { - if x != nil { - return x.PeerCertificateChainFingerprints - } - return nil -} - -func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { - if x != nil { - return x.LocalLeafCertFingerprint - } - return nil -} - -var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, - 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc -) - -func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) - }) - return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData -} - -var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ - (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity -} -var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } -func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { - if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*S2AContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, - MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, - }.Build() - File_internal_proto_v2_s2a_context_s2a_context_proto = out.File - file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil - file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil - file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go deleted file mode 100644 index e843450c7..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ /dev/null @@ -1,2494 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 -// source: internal/proto/v2/s2a/s2a.proto - -package s2a_go_proto - -import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" - common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" - s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SignatureAlgorithm int32 - -const ( - SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 - // RSA Public-Key Cryptography Standards #1. - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 - // ECDSA. - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 - SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 - // RSA Probabilistic Signature Scheme. - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 - SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 - // ED25519. - SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 -) - -// Enum value maps for SignatureAlgorithm. -var ( - SignatureAlgorithm_name = map[int32]string{ - 0: "S2A_SSL_SIGN_UNSPECIFIED", - 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", - 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", - 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", - 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", - 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", - 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", - 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", - 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", - 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", - 10: "S2A_SSL_SIGN_ED25519", - } - SignatureAlgorithm_value = map[string]int32{ - "S2A_SSL_SIGN_UNSPECIFIED": 0, - "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, - "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, - "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, - "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, - "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, - "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, - "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, - "S2A_SSL_SIGN_ED25519": 10, - } -) - -func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { - p := new(SignatureAlgorithm) - *p = x - return p -} - -func (x SignatureAlgorithm) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() -} - -func (SignatureAlgorithm) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] -} - -func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SignatureAlgorithm.Descriptor instead. -func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 - -const ( - GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 - GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 - GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 -) - -// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. -var ( - GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "DONT_REQUEST_CLIENT_CERTIFICATE", - 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", - 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", - 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", - 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", - } - GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ - "UNSPECIFIED": 0, - "DONT_REQUEST_CLIENT_CERTIFICATE": 1, - "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, - "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, - "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, - "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, - } -) - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { - p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) - *p = x - return p -} - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() -} - -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] -} - -func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. -func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} -} - -type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 - -const ( - OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 - // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of - // the TLS handshake must be signed to prove possession of the private key. - // - // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. - OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 - // When performing a TLS 1.2 handshake using an RSA algorithm, the key - // exchange algorithm involves the client generating a premaster secret, - // encrypting it using the server's public key, and sending this encrypted - // blob to the server in a ClientKeyExchange message. - // - // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. - OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 -) - -// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. -var ( - OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SIGN", - 2: "DECRYPT", - } - OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ - "UNSPECIFIED": 0, - "SIGN": 1, - "DECRYPT": 2, - } -) - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { - p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) - *p = x - return p -} - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() -} - -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] -} - -func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. -func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} -} - -type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 - -const ( - OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 - OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 - OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 -) - -// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. -var ( - OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "ENCRYPT", - 2: "DECRYPT", - } - OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ - "UNSPECIFIED": 0, - "ENCRYPT": 1, - "DECRYPT": 2, - } -) - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { - p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) - *p = x - return p -} - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() -} - -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] -} - -func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. -func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} -} - -type ValidatePeerCertificateChainReq_VerificationMode int32 - -const ( - // The default verification mode supported by S2A. - ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 - // The SPIFFE verification mode selects the set of trusted certificates to - // use for path building based on the SPIFFE trust domain in the peer's leaf - // certificate. - ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 - // The connect-to-Google verification mode uses the trust bundle for - // connecting to Google, e.g. *.mtls.googleapis.com endpoints. - ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 -) - -// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. -var ( - ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SPIFFE", - 2: "CONNECT_TO_GOOGLE", - } - ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, - } -) - -func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { - p := new(ValidatePeerCertificateChainReq_VerificationMode) - *p = x - return p -} - -func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() -} - -func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] -} - -func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. -func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} -} - -type ValidatePeerCertificateChainResp_ValidationResult int32 - -const ( - ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 - ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 - ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 -) - -// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. -var ( - ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "SUCCESS", - 2: "FAILURE", - } - ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ - "UNSPECIFIED": 0, - "SUCCESS": 1, - "FAILURE": 2, - } -) - -func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { - p := new(ValidatePeerCertificateChainResp_ValidationResult) - *p = x - return p -} - -func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { - return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() -} - -func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { - return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] -} - -func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. -func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} -} - -type AlpnPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // If true, the application MUST perform ALPN negotiation. - EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` - // The ordered list of ALPN protocols that specify how the application SHOULD - // negotiate ALPN during the TLS handshake. - // - // The application MAY ignore any ALPN protocols in this list that are not - // supported by the application. - AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` -} - -func (x *AlpnPolicy) Reset() { - *x = AlpnPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AlpnPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AlpnPolicy) ProtoMessage() {} - -func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. -func (*AlpnPolicy) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} -} - -func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { - if x != nil { - return x.EnableAlpnNegotiation - } - return false -} - -func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { - if x != nil { - return x.AlpnProtocols - } - return nil -} - -type AuthenticationMechanism struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Applications may specify an identity associated to an authentication - // mechanism. Otherwise, S2A assumes that the authentication mechanism is - // associated with the default identity. If the default identity cannot be - // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` - // Types that are assignable to MechanismOneof: - // - // *AuthenticationMechanism_Token - MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` -} - -func (x *AuthenticationMechanism) Reset() { - *x = AuthenticationMechanism{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AuthenticationMechanism) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AuthenticationMechanism) ProtoMessage() {} - -func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. -func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} -} - -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { - if x != nil { - return x.Identity - } - return nil -} - -func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { - if m != nil { - return m.MechanismOneof - } - return nil -} - -func (x *AuthenticationMechanism) GetToken() string { - if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { - return x.Token - } - return "" -} - -type isAuthenticationMechanism_MechanismOneof interface { - isAuthenticationMechanism_MechanismOneof() -} - -type AuthenticationMechanism_Token struct { - // A token that the application uses to authenticate itself to S2A. - Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` -} - -func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} - -type Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code that is specific to the application and the implementation - // of S2A, e.g., gRPC status code. - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // The status details. - Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` -} - -func (x *Status) Reset() { - *x = Status{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Status) ProtoMessage() {} - -func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Status.ProtoReflect.Descriptor instead. -func (*Status) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} -} - -func (x *Status) GetCode() uint32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Status) GetDetails() string { - if x != nil { - return x.Details - } - return "" -} - -type GetTlsConfigurationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The role of the application in the TLS connection. - ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` - // The server name indication (SNI) extension, which MAY be populated when a - // server is offloading to S2A. The SNI is used to determine the server - // identity if the local identity in the request is empty. - Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` -} - -func (x *GetTlsConfigurationReq) Reset() { - *x = GetTlsConfigurationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationReq) ProtoMessage() {} - -func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} -} - -func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { - if x != nil { - return x.ConnectionSide - } - return common_go_proto.ConnectionSide(0) -} - -func (x *GetTlsConfigurationReq) GetSni() string { - if x != nil { - return x.Sni - } - return "" -} - -type GetTlsConfigurationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TlsConfiguration: - // - // *GetTlsConfigurationResp_ClientTlsConfiguration_ - // *GetTlsConfigurationResp_ServerTlsConfiguration_ - TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` -} - -func (x *GetTlsConfigurationResp) Reset() { - *x = GetTlsConfigurationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp) ProtoMessage() {} - -func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} -} - -func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { - if m != nil { - return m.TlsConfiguration - } - return nil -} - -func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { - if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { - return x.ClientTlsConfiguration - } - return nil -} - -func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { - if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { - return x.ServerTlsConfiguration - } - return nil -} - -type isGetTlsConfigurationResp_TlsConfiguration interface { - isGetTlsConfigurationResp_TlsConfiguration() -} - -type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { - ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` -} - -type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { - ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` -} - -func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { -} - -func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { -} - -type OffloadPrivateKeyOperationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The operation the private key is used for. - Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` - // The signature algorithm to be used for signing operations. - SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` - // The input bytes to be signed or decrypted. - // - // Types that are assignable to InBytes: - // - // *OffloadPrivateKeyOperationReq_RawBytes - // *OffloadPrivateKeyOperationReq_Sha256Digest - // *OffloadPrivateKeyOperationReq_Sha384Digest - // *OffloadPrivateKeyOperationReq_Sha512Digest - InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` -} - -func (x *OffloadPrivateKeyOperationReq) Reset() { - *x = OffloadPrivateKeyOperationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadPrivateKeyOperationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} - -func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. -func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} -} - -func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { - if x != nil { - return x.Operation - } - return OffloadPrivateKeyOperationReq_UNSPECIFIED -} - -func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { - if x != nil { - return x.SignatureAlgorithm - } - return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED -} - -func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { - if m != nil { - return m.InBytes - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { - return x.RawBytes - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { - return x.Sha256Digest - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { - return x.Sha384Digest - } - return nil -} - -func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { - if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { - return x.Sha512Digest - } - return nil -} - -type isOffloadPrivateKeyOperationReq_InBytes interface { - isOffloadPrivateKeyOperationReq_InBytes() -} - -type OffloadPrivateKeyOperationReq_RawBytes struct { - // Raw bytes to be hashed and signed, or decrypted. - RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha256Digest struct { - // A SHA256 hash to be signed. Must be 32 bytes. - Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha384Digest struct { - // A SHA384 hash to be signed. Must be 48 bytes. - Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` -} - -type OffloadPrivateKeyOperationReq_Sha512Digest struct { - // A SHA512 hash to be signed. Must be 64 bytes. - Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` -} - -func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} - -type OffloadPrivateKeyOperationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The signed or decrypted output bytes. - OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` -} - -func (x *OffloadPrivateKeyOperationResp) Reset() { - *x = OffloadPrivateKeyOperationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadPrivateKeyOperationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} - -func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. -func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} -} - -func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { - if x != nil { - return x.OutBytes - } - return nil -} - -type OffloadResumptionKeyOperationReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The operation the resumption key is used for. - Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` - // The bytes to be encrypted or decrypted. - InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` -} - -func (x *OffloadResumptionKeyOperationReq) Reset() { - *x = OffloadResumptionKeyOperationReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadResumptionKeyOperationReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} - -func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. -func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} -} - -func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { - if x != nil { - return x.Operation - } - return OffloadResumptionKeyOperationReq_UNSPECIFIED -} - -func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { - if x != nil { - return x.InBytes - } - return nil -} - -type OffloadResumptionKeyOperationResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The encrypted or decrypted bytes. - OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` -} - -func (x *OffloadResumptionKeyOperationResp) Reset() { - *x = OffloadResumptionKeyOperationResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OffloadResumptionKeyOperationResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} - -func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. -func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} -} - -func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { - if x != nil { - return x.OutBytes - } - return nil -} - -type ValidatePeerCertificateChainReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The verification mode that S2A MUST use to validate the peer certificate - // chain. - Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` - // Types that are assignable to PeerOneof: - // - // *ValidatePeerCertificateChainReq_ClientPeer_ - // *ValidatePeerCertificateChainReq_ServerPeer_ - PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` -} - -func (x *ValidatePeerCertificateChainReq) Reset() { - *x = ValidatePeerCertificateChainReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} -} - -func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { - if x != nil { - return x.Mode - } - return ValidatePeerCertificateChainReq_UNSPECIFIED -} - -func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { - if m != nil { - return m.PeerOneof - } - return nil -} - -func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { - if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { - return x.ClientPeer - } - return nil -} - -func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { - if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { - return x.ServerPeer - } - return nil -} - -type isValidatePeerCertificateChainReq_PeerOneof interface { - isValidatePeerCertificateChainReq_PeerOneof() -} - -type ValidatePeerCertificateChainReq_ClientPeer_ struct { - ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` -} - -type ValidatePeerCertificateChainReq_ServerPeer_ struct { - ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` -} - -func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} - -func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} - -type ValidatePeerCertificateChainResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The result of validating the peer certificate chain. - ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` - // The validation details. This field is only populated when the validation - // result is NOT SUCCESS. - ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` - // The S2A context contains information from the peer certificate chain. - // - // The S2A context MAY be populated even if validation of the peer certificate - // chain fails. - Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` -} - -func (x *ValidatePeerCertificateChainResp) Reset() { - *x = ValidatePeerCertificateChainResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainResp) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} -} - -func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { - if x != nil { - return x.ValidationResult - } - return ValidatePeerCertificateChainResp_UNSPECIFIED -} - -func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { - if x != nil { - return x.ValidationDetails - } - return "" -} - -func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { - if x != nil { - return x.Context - } - return nil -} - -type SessionReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The identity corresponding to the TLS configurations that MUST be used for - // the TLS handshake. - // - // If a managed identity already exists, the local identity and authentication - // mechanisms are ignored. If a managed identity doesn't exist and the local - // identity is not populated, S2A will try to deduce the managed identity to - // use from the SNI extension. If that also fails, S2A uses the default - // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` - // The authentication mechanisms that the application wishes to use to - // authenticate to S2A, ordered by preference. S2A will always use the first - // authentication mechanism that matches the managed identity. - AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` - // Types that are assignable to ReqOneof: - // - // *SessionReq_GetTlsConfigurationReq - // *SessionReq_OffloadPrivateKeyOperationReq - // *SessionReq_OffloadResumptionKeyOperationReq - // *SessionReq_ValidatePeerCertificateChainReq - ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` -} - -func (x *SessionReq) Reset() { - *x = SessionReq{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionReq) ProtoMessage() {} - -func (x *SessionReq) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. -func (*SessionReq) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} -} - -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { - if x != nil { - return x.LocalIdentity - } - return nil -} - -func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { - if x != nil { - return x.AuthenticationMechanisms - } - return nil -} - -func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { - if m != nil { - return m.ReqOneof - } - return nil -} - -func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { - if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { - return x.GetTlsConfigurationReq - } - return nil -} - -func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { - if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { - return x.OffloadPrivateKeyOperationReq - } - return nil -} - -func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { - if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { - return x.OffloadResumptionKeyOperationReq - } - return nil -} - -func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { - if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { - return x.ValidatePeerCertificateChainReq - } - return nil -} - -type isSessionReq_ReqOneof interface { - isSessionReq_ReqOneof() -} - -type SessionReq_GetTlsConfigurationReq struct { - // Requests the certificate chain and TLS configuration corresponding to the - // local identity, which the application MUST use to negotiate the TLS - // handshake. - GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` -} - -type SessionReq_OffloadPrivateKeyOperationReq struct { - // Signs or decrypts the input bytes using a private key corresponding to - // the local identity in the request. - // - // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the - // S2Av2 by a server during a TLS 1.2 handshake. - OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` -} - -type SessionReq_OffloadResumptionKeyOperationReq struct { - // Encrypts or decrypts the input bytes using a resumption key corresponding - // to the local identity in the request. - OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` -} - -type SessionReq_ValidatePeerCertificateChainReq struct { - // Verifies the peer's certificate chain using - // (a) trust bundles corresponding to the local identity in the request, and - // (b) the verification mode in the request. - ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` -} - -func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} - -func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} - -type SessionResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Status of the session response. - // - // The status field is populated so that if an error occurs when making an - // individual request, then communication with the S2A may continue. If an - // error is returned directly (e.g. at the gRPC layer), then it may result - // that the bidirectional stream being closed. - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - // Types that are assignable to RespOneof: - // - // *SessionResp_GetTlsConfigurationResp - // *SessionResp_OffloadPrivateKeyOperationResp - // *SessionResp_OffloadResumptionKeyOperationResp - // *SessionResp_ValidatePeerCertificateChainResp - RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` -} - -func (x *SessionResp) Reset() { - *x = SessionResp{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SessionResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SessionResp) ProtoMessage() {} - -func (x *SessionResp) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. -func (*SessionResp) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} -} - -func (x *SessionResp) GetStatus() *Status { - if x != nil { - return x.Status - } - return nil -} - -func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { - if m != nil { - return m.RespOneof - } - return nil -} - -func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { - if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { - return x.GetTlsConfigurationResp - } - return nil -} - -func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { - if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { - return x.OffloadPrivateKeyOperationResp - } - return nil -} - -func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { - if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { - return x.OffloadResumptionKeyOperationResp - } - return nil -} - -func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { - if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { - return x.ValidatePeerCertificateChainResp - } - return nil -} - -type isSessionResp_RespOneof interface { - isSessionResp_RespOneof() -} - -type SessionResp_GetTlsConfigurationResp struct { - // Contains the certificate chain and TLS configurations corresponding to - // the local identity. - GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` -} - -type SessionResp_OffloadPrivateKeyOperationResp struct { - // Contains the signed or encrypted output bytes using the private key - // corresponding to the local identity. - OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` -} - -type SessionResp_OffloadResumptionKeyOperationResp struct { - // Contains the encrypted or decrypted output bytes using the resumption key - // corresponding to the local identity. - OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` -} - -type SessionResp_ValidatePeerCertificateChainResp struct { - // Contains the validation result, peer identity and fingerprints of peer - // certificates. - ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` -} - -func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} - -func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} - -// Next ID: 8 -type GetTlsConfigurationResp_ClientTlsConfiguration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain that the client MUST use for the TLS handshake. - // It's a list of PEM-encoded certificates, ordered from leaf to root, - // excluding the root. - CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The minimum TLS version number that the client MUST use for the TLS - // handshake. If this field is not provided, the client MUST use the default - // minimum version of the client's TLS library. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` - // The maximum TLS version number that the client MUST use for the TLS - // handshake. If this field is not provided, the client MUST use the default - // maximum version of the client's TLS library. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` - // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to - // negotiate in the TLS handshake. - Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` - // The policy that dictates how the client negotiates ALPN during the TLS - // handshake. - AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { - *x = GetTlsConfigurationResp_ClientTlsConfiguration{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuites - } - return nil -} - -func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { - if x != nil { - return x.AlpnPolicy - } - return nil -} - -// Next ID: 12 -type GetTlsConfigurationResp_ServerTlsConfiguration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain that the server MUST use for the TLS handshake. - // It's a list of PEM-encoded certificates, ordered from leaf to root, - // excluding the root. - CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The minimum TLS version number that the server MUST use for the TLS - // handshake. If this field is not provided, the server MUST use the default - // minimum version of the server's TLS library. - MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` - // The maximum TLS version number that the server MUST use for the TLS - // handshake. If this field is not provided, the server MUST use the default - // maximum version of the server's TLS library. - MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` - // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to - // negotiate in the TLS handshake. - Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` - // Whether to enable TLS resumption. - TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` - // Whether the server MUST request a client certificate (i.e. to negotiate - // TLS vs. mTLS). - RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` - // Returns the maximum number of extra bytes that - // |OffloadResumptionKeyOperation| can add to the number of unencrypted - // bytes to form the encrypted bytes. - MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` - // The policy that dictates how the server negotiates ALPN during the TLS - // handshake. - AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { - *x = GetTlsConfigurationResp_ServerTlsConfiguration{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. -func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MinTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { - if x != nil { - return x.MaxTlsVersion - } - return common_go_proto.TLSVersion(0) -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { - if x != nil { - return x.Ciphersuites - } - return nil -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { - if x != nil { - return x.TlsResumptionEnabled - } - return false -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { - if x != nil { - return x.RequestClientCertificate - } - return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { - if x != nil { - return x.MaxOverheadOfTicketAead - } - return 0 -} - -func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { - if x != nil { - return x.AlpnPolicy - } - return nil -} - -type ValidatePeerCertificateChainReq_ClientPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain to be verified. The chain MUST be a list of - // DER-encoded certificates, ordered from leaf to root, excluding the root. - CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { - *x = ValidatePeerCertificateChainReq_ClientPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { - if x != nil { - return x.CertificateChain - } - return nil -} - -type ValidatePeerCertificateChainReq_ServerPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The certificate chain to be verified. The chain MUST be a list of - // DER-encoded certificates, ordered from leaf to root, excluding the root. - CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` - // The expected hostname of the server. - ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` - // The UnrestrictedClientPolicy specified by the user. - SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { - *x = ValidatePeerCertificateChainReq_ServerPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { - mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. -func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { - return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { - if x != nil { - return x.CertificateChain - } - return nil -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { - if x != nil { - return x.ServerHostname - } - return "" -} - -func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { - if x != nil { - return x.SerializedUnrestrictedClientPolicy - } - return nil -} - -var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor - -var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once - file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc -) - -func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { - file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { - file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) - }) - return file_internal_proto_v2_s2a_s2a_proto_rawDescData -} - -var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ - (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm - (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate - (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation - (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation - (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode - (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult - (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy - (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism - (*Status)(nil), // 8: s2a.proto.v2.Status - (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq - (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp - (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq - (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp - (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq - (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp - (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq - (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp - (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq - (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp - (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration - (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration - (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer - (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer - (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity - (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide - (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext - (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion - (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite -} -var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ - 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity - 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide - 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration - 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration - 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation - 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm - 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation - 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode - 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer - 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer - 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult - 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity - 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism - 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq - 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq - 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq - 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq - 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status - 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp - 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp - 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp - 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp - 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion - 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion - 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite - 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy - 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion - 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion - 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite - 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate - 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy - 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq - 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp - 34, // [34:35] is the sub-list for method output_type - 33, // [33:34] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name -} - -func init() { file_internal_proto_v2_s2a_s2a_proto_init() } -func file_internal_proto_v2_s2a_s2a_proto_init() { - if File_internal_proto_v2_s2a_s2a_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AlpnPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthenticationMechanism); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadPrivateKeyOperationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadPrivateKeyOperationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadResumptionKeyOperationReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OffloadResumptionKeyOperationResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SessionResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*AuthenticationMechanism_Token)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ - (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), - (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*OffloadPrivateKeyOperationReq_RawBytes)(nil), - (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), - (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), - (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ - (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), - (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ - (*SessionReq_GetTlsConfigurationReq)(nil), - (*SessionReq_OffloadPrivateKeyOperationReq)(nil), - (*SessionReq_OffloadResumptionKeyOperationReq)(nil), - (*SessionReq_ValidatePeerCertificateChainReq)(nil), - } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ - (*SessionResp_GetTlsConfigurationResp)(nil), - (*SessionResp_OffloadPrivateKeyOperationResp)(nil), - (*SessionResp_OffloadResumptionKeyOperationResp)(nil), - (*SessionResp_ValidatePeerCertificateChainResp)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, - NumEnums: 6, - NumMessages: 17, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, - DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, - EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, - MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, - }.Build() - File_internal_proto_v2_s2a_s2a_proto = out.File - file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil - file_internal_proto_v2_s2a_s2a_proto_goTypes = nil - file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go deleted file mode 100644 index 2566df6c3..000000000 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.12 -// source: internal/proto/v2/s2a/s2a.proto - -package s2a_go_proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" -) - -// S2AServiceClient is the client API for S2AService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type S2AServiceClient interface { - // SetUpSession is a bidirectional stream used by applications to offload - // operations from the TLS handshake. - SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) -} - -type s2AServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { - return &s2AServiceClient{cc} -} - -func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &s2AServiceSetUpSessionClient{stream} - return x, nil -} - -type S2AService_SetUpSessionClient interface { - Send(*SessionReq) error - Recv() (*SessionResp, error) - grpc.ClientStream -} - -type s2AServiceSetUpSessionClient struct { - grpc.ClientStream -} - -func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { - return x.ClientStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { - m := new(SessionResp) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AServiceServer is the server API for S2AService service. -// All implementations must embed UnimplementedS2AServiceServer -// for forward compatibility -type S2AServiceServer interface { - // SetUpSession is a bidirectional stream used by applications to offload - // operations from the TLS handshake. - SetUpSession(S2AService_SetUpSessionServer) error - mustEmbedUnimplementedS2AServiceServer() -} - -// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. -type UnimplementedS2AServiceServer struct { -} - -func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { - return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") -} -func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} - -// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to S2AServiceServer will -// result in compilation errors. -type UnsafeS2AServiceServer interface { - mustEmbedUnimplementedS2AServiceServer() -} - -func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { - s.RegisterService(&S2AService_ServiceDesc, srv) -} - -func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) -} - -type S2AService_SetUpSessionServer interface { - Send(*SessionResp) error - Recv() (*SessionReq, error) - grpc.ServerStream -} - -type s2AServiceSetUpSessionServer struct { - grpc.ServerStream -} - -func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { - return x.ServerStream.SendMsg(m) -} - -func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { - m := new(SessionReq) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var S2AService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "s2a.proto.v2.S2AService", - HandlerType: (*S2AServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "SetUpSession", - Handler: _S2AService_SetUpSession_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/proto/v2/s2a/s2a.proto", -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go deleted file mode 100644 index 486f4ec4f..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package aeadcrypter provides the interface for AEAD cipher implementations -// used by S2A's record protocol. -package aeadcrypter - -// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record -// protocol. -type S2AAEADCrypter interface { - // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. - // dst and plaintext may fully overlap or not at all. - Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) - // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may - // fully overlap or not at all. - Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) - // TagSize returns the tag size in bytes. - TagSize() int -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go deleted file mode 100644 index 85c4e595d..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/aes" - "crypto/cipher" - "fmt" -) - -// Supported key sizes in bytes. -const ( - AES128GCMKeySize = 16 - AES256GCMKeySize = 32 -) - -// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. -type aesgcm struct { - aead cipher.AEAD -} - -// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be -// either 128 bits or 256 bits. -func NewAESGCM(key []byte) (S2AAEADCrypter, error) { - if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { - return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) - } - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - a, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - return &aesgcm{aead: a}, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { - return encrypt(s.aead, dst, plaintext, nonce, aad) -} - -func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { - return decrypt(s.aead, dst, ciphertext, nonce, aad) -} - -func (s *aesgcm) TagSize() int { - return TagSize -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go deleted file mode 100644 index 214df4ca4..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/cipher" - "fmt" - - "golang.org/x/crypto/chacha20poly1305" -) - -// Supported key size in bytes. -const ( - Chacha20Poly1305KeySize = 32 -) - -// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD -// crypter. -type chachapoly struct { - aead cipher.AEAD -} - -// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must -// be Chacha20Poly1305KeySize bytes in length. -func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { - if len(key) != Chacha20Poly1305KeySize { - return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) - } - c, err := chacha20poly1305.New(key) - if err != nil { - return nil, err - } - return &chachapoly{aead: c}, nil -} - -// Encrypt is the encryption function. dst can contain bytes at the beginning of -// the ciphertext that will not be encrypted but will be authenticated. If dst -// has enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { - return encrypt(s.aead, dst, plaintext, nonce, aad) -} - -func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { - return decrypt(s.aead, dst, ciphertext, nonce, aad) -} - -func (s *chachapoly) TagSize() int { - return TagSize -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go deleted file mode 100644 index b3c36ad95..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package aeadcrypter - -import ( - "crypto/cipher" - "fmt" -) - -const ( - // TagSize is the tag size in bytes for AES-128-GCM-SHA256, - // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. - TagSize = 16 - // NonceSize is the size of the nonce in number of bytes for - // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. - NonceSize = 12 - // SHA256DigestSize is the digest size of sha256 in bytes. - SHA256DigestSize = 32 - // SHA384DigestSize is the digest size of sha384 in bytes. - SHA384DigestSize = 48 -) - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return head, tail -} - -// encrypt is the encryption function for an AEAD crypter. aead determines -// the type of AEAD crypter. dst can contain bytes at the beginning of the -// ciphertext that will not be encrypted but will be authenticated. If dst has -// enough capacity to hold these bytes, the ciphertext and the tag, no -// allocation and copy operations will be performed. dst and plaintext may -// fully overlap or not at all. -func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) - } - // If we need to allocate an output buffer, we want to include space for - // the tag to avoid forcing the caller to reallocate as well. - dlen := len(dst) - dst, out := sliceForAppend(dst, len(plaintext)+TagSize) - data := out[:len(plaintext)] - copy(data, plaintext) // data may fully overlap plaintext - - // Seal appends the ciphertext and the tag to its first argument and - // returns the updated slice. However, sliceForAppend above ensures that - // dst has enough capacity to avoid a reallocation and copy due to the - // append. - dst = aead.Seal(dst[:dlen], nonce, data, aad) - return dst, nil -} - -// decrypt is the decryption function for an AEAD crypter, where aead determines -// the type of AEAD crypter, and dst the destination bytes for the decrypted -// ciphertext. The dst buffer may fully overlap with plaintext or not at all. -func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) - } - // If dst is equal to ciphertext[:0], ciphertext storage is reused. - plaintext, err := aead.Open(dst, nonce, ciphertext, aad) - if err != nil { - return nil, fmt.Errorf("message auth failed: %v", err) - } - return plaintext, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go deleted file mode 100644 index ddeaa6d77..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import ( - "crypto/sha256" - "crypto/sha512" - "fmt" - "hash" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/aeadcrypter" -) - -// ciphersuite is the interface for retrieving ciphersuite-specific information -// and utilities. -type ciphersuite interface { - // keySize returns the key size in bytes. This refers to the key used by - // the AEAD crypter. This is derived by calling HKDF expand on the traffic - // secret. - keySize() int - // nonceSize returns the nonce size in bytes. - nonceSize() int - // trafficSecretSize returns the traffic secret size in bytes. This refers - // to the secret used to derive the traffic key and nonce, as specified in - // https://tools.ietf.org/html/rfc8446#section-7. - trafficSecretSize() int - // hashFunction returns the hash function for the ciphersuite. - hashFunction() func() hash.Hash - // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite - // using that key. - aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) -} - -func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { - switch ciphersuite { - case s2apb.Ciphersuite_AES_128_GCM_SHA256: - return &aesgcm128sha256{}, nil - case s2apb.Ciphersuite_AES_256_GCM_SHA384: - return &aesgcm256sha384{}, nil - case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: - return &chachapolysha256{}, nil - default: - return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) - } -} - -// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite -// interface. -type aesgcm128sha256 struct{} - -func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } -func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } -func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } -func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } -func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewAESGCM(key) -} - -// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite -// interface. -type aesgcm256sha384 struct{} - -func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } -func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } -func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } -func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } -func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewAESGCM(key) -} - -// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite -// interface. -type chachapolysha256 struct{} - -func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } -func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } -func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } -func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } -func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { - return aeadcrypter.NewChachaPoly(key) -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go deleted file mode 100644 index 9499cdca7..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import "errors" - -// counter is a 64-bit counter. -type counter struct { - val uint64 - hasOverflowed bool -} - -// newCounter creates a new counter with the initial value set to val. -func newCounter(val uint64) counter { - return counter{val: val} -} - -// value returns the current value of the counter. -func (c *counter) value() (uint64, error) { - if c.hasOverflowed { - return 0, errors.New("counter has overflowed") - } - return c.val, nil -} - -// increment increments the counter and checks for overflow. -func (c *counter) increment() { - // If the counter is already invalid due to overflow, there is no need to - // increase it. We check for the hasOverflowed flag in the call to value(). - if c.hasOverflowed { - return - } - c.val++ - if c.val == 0 { - c.hasOverflowed = true - } -} - -// reset sets the counter value to zero and sets the hasOverflowed flag to -// false. -func (c *counter) reset() { - c.val = 0 - c.hasOverflowed = false -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go deleted file mode 100644 index e05f2c36a..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package halfconn - -import ( - "fmt" - "hash" - - "golang.org/x/crypto/hkdf" -) - -// hkdfExpander is the interface for the HKDF expansion function; see -// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is -// specified in https://tools.ietf.org/html/rfc8446#section-7.2 -type hkdfExpander interface { - // expand takes a secret, a label, and the output length in bytes, and - // returns the resulting expanded key. - expand(secret, label []byte, length int) ([]byte, error) -} - -// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf -// for HKDF expansion. -type defaultHKDFExpander struct { - h func() hash.Hash -} - -// newDefaultHKDFExpander creates an instance of the default HKDF expander -// using the given hash function. -func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { - return &defaultHKDFExpander{h: h} -} - -func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { - outBuf := make([]byte, length) - n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) - if err != nil { - return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) - } - if n < length { - return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) - } - return outBuf, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go deleted file mode 100644 index dff99ff59..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 -// connection. -package halfconn - -import ( - "fmt" - "sync" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/aeadcrypter" - "golang.org/x/crypto/cryptobyte" -) - -// The constants below were taken from Section 7.2 and 7.3 in -// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label -// in HKDF-Expand-Label. -const ( - tls13Key = "tls13 key" - tls13Nonce = "tls13 iv" - tls13Update = "tls13 traffic upd" -) - -// S2AHalfConnection stores the state of the TLS 1.3 connection in the -// inbound or outbound direction. -type S2AHalfConnection struct { - cs ciphersuite - expander hkdfExpander - // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. - mutex sync.Mutex - aeadCrypter aeadcrypter.S2AAEADCrypter - sequence counter - trafficSecret []byte - nonce []byte -} - -// New creates a new instance of S2AHalfConnection given a ciphersuite and a -// traffic secret. -func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { - cs, err := newCiphersuite(ciphersuite) - if err != nil { - return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) - } - if cs.trafficSecretSize() != len(trafficSecret) { - return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) - } - - hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} - if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { - return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) - } - - return hc, nil -} - -// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. -// dst and plaintext may fully overlap or not at all. Note that the sequence -// number will still be incremented on failure, unless the sequence has -// overflowed. -func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { - hc.mutex.Lock() - sequence, err := hc.getAndIncrementSequence() - if err != nil { - hc.mutex.Unlock() - return nil, err - } - nonce := hc.maskedNonce(sequence) - crypter := hc.aeadCrypter - hc.mutex.Unlock() - return crypter.Encrypt(dst, plaintext, nonce, aad) -} - -// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may -// fully overlap or not at all. Note that the sequence number will still be -// incremented on failure, unless the sequence has overflowed. -func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { - hc.mutex.Lock() - sequence, err := hc.getAndIncrementSequence() - if err != nil { - hc.mutex.Unlock() - return nil, err - } - nonce := hc.maskedNonce(sequence) - crypter := hc.aeadCrypter - hc.mutex.Unlock() - return crypter.Decrypt(dst, ciphertext, nonce, aad) -} - -// UpdateKey advances the traffic secret key, as specified in -// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives -// a new key and nonce, and resets the sequence number. -func (hc *S2AHalfConnection) UpdateKey() error { - hc.mutex.Lock() - defer hc.mutex.Unlock() - - var err error - hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) - if err != nil { - return fmt.Errorf("failed to derive traffic secret: %v", err) - } - - if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { - return fmt.Errorf("failed to update half connection: %v", err) - } - - hc.sequence.reset() - return nil -} - -// TagSize returns the tag size in bytes of the underlying AEAD crypter. -func (hc *S2AHalfConnection) TagSize() int { - return hc.aeadCrypter.TagSize() -} - -// updateCrypterAndNonce takes a new traffic secret and updates the crypter -// and nonce. Note that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { - key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) - if err != nil { - return fmt.Errorf("failed to update key: %v", err) - } - - hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) - if err != nil { - return fmt.Errorf("failed to update nonce: %v", err) - } - - hc.aeadCrypter, err = hc.cs.aeadCrypter(key) - if err != nil { - return fmt.Errorf("failed to update AEAD crypter: %v", err) - } - return nil -} - -// getAndIncrement returns the current sequence number and increments it. Note -// that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { - sequence, err := hc.sequence.value() - if err != nil { - return 0, err - } - hc.sequence.increment() - return sequence, nil -} - -// maskedNonce creates a copy of the nonce that is masked with the sequence -// number. Note that the mutex must be held while calling this function. -func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { - const uint64Size = 8 - nonce := make([]byte, len(hc.nonce)) - copy(nonce, hc.nonce) - for i := 0; i < uint64Size; i++ { - nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) - } - return nonce -} - -// deriveSecret implements the Derive-Secret function, as specified in -// https://tools.ietf.org/html/rfc8446#section-7.1. -func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { - var hkdfLabel cryptobyte.Builder - hkdfLabel.AddUint16(uint16(length)) - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(label) - }) - // Append an empty `Context` field to the label, as specified in the RFC. - // The half connection does not use the `Context` field. - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte("")) - }) - hkdfLabelBytes, err := hkdfLabel.Bytes() - if err != nil { - return nil, fmt.Errorf("deriveSecret failed: %v", err) - } - return hc.expander.expand(secret, hkdfLabelBytes, length) -} diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go deleted file mode 100644 index c60515510..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ /dev/null @@ -1,757 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package record implements the TLS 1.3 record protocol used by the S2A -// transport credentials. -package record - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "net" - "sync" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - "github.com/google/s2a-go/internal/record/internal/halfconn" - "github.com/google/s2a-go/internal/tokenmanager" - "google.golang.org/grpc/grpclog" -) - -// recordType is the `ContentType` as described in -// https://tools.ietf.org/html/rfc8446#section-5.1. -type recordType byte - -const ( - alert recordType = 21 - handshake recordType = 22 - applicationData recordType = 23 -) - -// keyUpdateRequest is the `KeyUpdateRequest` as described in -// https://tools.ietf.org/html/rfc8446#section-4.6.3. -type keyUpdateRequest byte - -const ( - updateNotRequested keyUpdateRequest = 0 - updateRequested keyUpdateRequest = 1 -) - -// alertDescription is the `AlertDescription` as described in -// https://tools.ietf.org/html/rfc8446#section-6. -type alertDescription byte - -const ( - closeNotify alertDescription = 0 -) - -// sessionTicketState is used to determine whether session tickets have not yet -// been received, are in the process of being received, or have finished -// receiving. -type sessionTicketState byte - -const ( - ticketsNotYetReceived sessionTicketState = 0 - receivingTickets sessionTicketState = 1 - notReceivingTickets sessionTicketState = 2 -) - -const ( - // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, - // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from - // https://tools.ietf.org/html/rfc8446#section-5.1. - - // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext - // in a single TLS 1.3 record. - tlsRecordMaxPlaintextSize = 16384 // 2^14 - // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. - tlsRecordTypeSize = 1 - // tlsTagSize is the size in bytes of the tag of the following three - // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, - // CHACHA20-POLY1305-SHA256. - tlsTagSize = 16 - // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a - // single TLS 1.3 record. This is the maximum size of the plaintext plus the - // record type byte and 16 bytes of the tag. - tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize - // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record - // header type. - tlsRecordHeaderTypeSize = 1 - // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS - // 1.3 record header legacy record version. - tlsRecordHeaderLegacyRecordVersionSize = 2 - // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 - // record header payload length. - tlsRecordHeaderPayloadLengthSize = 2 - // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. - tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize - // tlsRecordMaxSize - tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize - // tlsApplicationData is the application data type of the TLS 1.3 record - // header. - tlsApplicationData = 23 - // tlsLegacyRecordVersion is the legacy record version of the TLS record. - tlsLegacyRecordVersion = 3 - // tlsAlertSize is the size in bytes of an alert of TLS 1.3. - tlsAlertSize = 2 -) - -const ( - // These are TLS 1.3 handshake-specific constants. - - // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session - // ticket message of TLS 1.3. - tlsHandshakeNewSessionTicketType = 4 - // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message - // of TLS 1.3. - tlsHandshakeKeyUpdateType = 24 - // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake - // message type field. - tlsHandshakeMsgTypeSize = 1 - // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake - // message length field. - tlsHandshakeLengthSize = 3 - // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 - // handshake key update message. - tlsHandshakeKeyUpdateMsgSize = 1 - // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 - // handshake message. - tlsHandshakePrefixSize = 4 - // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message - // in TLS 1.3. This is the sum of the max sizes of all the fields in the - // NewSessionTicket struct specified in - // https://tools.ietf.org/html/rfc8446#section-4.6.1. - tlsMaxSessionTicketSize = 131338 -) - -const ( - // outBufMaxRecords is the maximum number of records that can fit in the - // ourRecordsBuf buffer. - outBufMaxRecords = 16 - // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. - outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize - // maxAllowedTickets is the maximum number of session tickets that are - // allowed. The number of tickets are limited to ensure that the size of the - // ticket queue does not grow indefinitely. S2A also keeps a limit on the - // number of tickets that it caches. - maxAllowedTickets = 5 -) - -// preConstructedKeyUpdateMsg holds the key update message. This is needed as an -// optimization so that the same message does not need to be constructed every -// time a key update message is sent. -var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() - -// conn represents a secured TLS connection. It implements the net.Conn -// interface. -type conn struct { - net.Conn - // inConn is the half connection responsible for decrypting incoming bytes. - inConn *halfconn.S2AHalfConnection - // outConn is the half connection responsible for encrypting outgoing bytes. - outConn *halfconn.S2AHalfConnection - // pendingApplicationData holds data that has been read from the connection - // and decrypted, but has not yet been returned by Read. - pendingApplicationData []byte - // unusedBuf holds data read from the network that has not yet been - // decrypted. This data might not consist of a complete record. It may - // consist of several records, the last of which could be incomplete. - unusedBuf []byte - // outRecordsBuf is a buffer used to store outgoing TLS records before - // they are written to the network. - outRecordsBuf []byte - // nextRecord stores the next record info in the unusedBuf buffer. - nextRecord []byte - // overheadSize is the overhead size in bytes of each TLS 1.3 record, which - // is computed as overheadSize = header size + record type byte + tag size. - // Note that there is no padding by zeros in the overhead calculation. - overheadSize int - // readMutex guards against concurrent calls to Read. This is required since - // Close may be called during a Read. - readMutex sync.Mutex - // writeMutex guards against concurrent calls to Write. This is required - // since Close may be called during a Write, and also because a key update - // message may be written during a Read. - writeMutex sync.Mutex - // handshakeBuf holds handshake messages while they are being processed. - handshakeBuf []byte - // ticketState is the current processing state of the session tickets. - ticketState sessionTicketState - // sessionTickets holds the completed session tickets until they are sent to - // the handshaker service for processing. - sessionTickets [][]byte - // ticketSender sends session tickets to the S2A handshaker service. - ticketSender s2aTicketSender - // callComplete is a channel that blocks closing the record protocol until a - // pending call to the S2A completes. - callComplete chan bool -} - -// ConnParameters holds the parameters used for creating a new conn object. -type ConnParameters struct { - // NetConn is the TCP connection to the peer. This parameter is required. - NetConn net.Conn - // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker - // service. This parameter is required. - Ciphersuite commonpb.Ciphersuite - // TLSVersion is the TLS version number negotiated by the S2A handshaker - // service. This parameter is required. - TLSVersion commonpb.TLSVersion - // InTrafficSecret is the traffic secret used to derive the session key for - // the inbound direction. This parameter is required. - InTrafficSecret []byte - // OutTrafficSecret is the traffic secret used to derive the session key - // for the outbound direction. This parameter is required. - OutTrafficSecret []byte - // UnusedBuf is the data read from the network that has not yet been - // decrypted. This parameter is optional. If not provided, then no - // application data was sent in the same flight of messages as the final - // handshake message. - UnusedBuf []byte - // InSequence is the sequence number of the next, incoming, TLS record. - // This parameter is required. - InSequence uint64 - // OutSequence is the sequence number of the next, outgoing, TLS record. - // This parameter is required. - OutSequence uint64 - // HSAddr stores the address of the S2A handshaker service. This parameter - // is optional. If not provided, then TLS resumption is disabled. - HSAddr string - // ConnectionId is the connection identifier that was created and sent by - // S2A at the end of a handshake. - ConnectionID uint64 - // LocalIdentity is the local identity that was used by S2A during session - // setup and included in the session result. - LocalIdentity *commonpb.Identity - // EnsureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - EnsureProcessSessionTickets *sync.WaitGroup -} - -// NewConn creates a TLS record protocol that wraps the TCP connection. -func NewConn(o *ConnParameters) (net.Conn, error) { - if o == nil { - return nil, errors.New("conn options must not be nil") - } - if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { - return nil, errors.New("TLS version must be TLS 1.3") - } - - inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) - if err != nil { - return nil, fmt.Errorf("failed to create inbound half connection: %v", err) - } - outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) - if err != nil { - return nil, fmt.Errorf("failed to create outbound half connection: %v", err) - } - - // The tag size for the in/out connections should be the same. - overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() - var unusedBuf []byte - if o.UnusedBuf == nil { - // We pre-allocate unusedBuf to be of size - // 2*tlsRecordMaxSize-1 during initialization. We only read from the - // network into unusedBuf when unusedBuf does not contain a complete - // record and the incomplete record is at most tlsRecordMaxSize-1 - // (bytes). And we read at most tlsRecordMaxSize bytes of data from the - // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 - // is large enough to buffer data read from the network. - unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) - } else { - unusedBuf = make([]byte, len(o.UnusedBuf)) - copy(unusedBuf, o.UnusedBuf) - } - - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - grpclog.Infof("failed to create single token access token manager: %v", err) - } - - s2aConn := &conn{ - Conn: o.NetConn, - inConn: inConn, - outConn: outConn, - unusedBuf: unusedBuf, - outRecordsBuf: make([]byte, tlsRecordMaxSize), - nextRecord: unusedBuf, - overheadSize: overheadSize, - ticketState: ticketsNotYetReceived, - // Pre-allocate the buffer for one session ticket message and the max - // plaintext size. This is the largest size that handshakeBuf will need - // to hold. The largest incomplete handshake message is the - // [handshake header size] + [max session ticket size] - 1. - // Then, tlsRecordMaxPlaintextSize is the maximum size that will be - // appended to the handshakeBuf before the handshake message is - // completed. Therefore, the buffer size below should be large enough to - // buffer any handshake messages. - handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), - ticketSender: &ticketSender{ - hsAddr: o.HSAddr, - connectionID: o.ConnectionID, - localIdentity: o.LocalIdentity, - tokenManager: tokenManager, - ensureProcessSessionTickets: o.EnsureProcessSessionTickets, - }, - callComplete: make(chan bool), - } - return s2aConn, nil -} - -// Read reads and decrypts a TLS 1.3 record from the underlying connection, and -// copies any application data received from the peer into b. If the size of the -// payload is greater than len(b), Read retains the remaining bytes in an -// internal buffer, and subsequent calls to Read will read from this buffer -// until it is exhausted. At most 1 TLS record worth of application data is -// written to b for each call to Read. -// -// Note that for the user to efficiently call this method, the user should -// ensure that the buffer b is allocated such that the buffer does not have any -// unused segments. This can be done by calling Read via io.ReadFull, which -// continually calls Read until the specified buffer has been filled. Also note -// that the user should close the connection via Close() if an error is thrown -// by a call to Read. -func (p *conn) Read(b []byte) (n int, err error) { - p.readMutex.Lock() - defer p.readMutex.Unlock() - // Check if p.pendingApplication data has leftover application data from - // the previous call to Read. - if len(p.pendingApplicationData) == 0 { - // Read a full record from the wire. - record, err := p.readFullRecord() - if err != nil { - return 0, err - } - // Now we have a complete record, so split the header and validate it - // The TLS record is split into 2 pieces: the record header and the - // payload. The payload has the following form: - // [payload] = [ciphertext of application data] - // + [ciphertext of record type byte] - // + [(optionally) ciphertext of padding by zeros] - // + [tag] - header, payload, err := splitAndValidateHeader(record) - if err != nil { - return 0, err - } - // Decrypt the ciphertext. - p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) - if err != nil { - return 0, err - } - // Remove the padding by zeros and the record type byte from the - // p.pendingApplicationData buffer. - msgType, err := p.stripPaddingAndType() - if err != nil { - return 0, err - } - // Check that the length of the plaintext after stripping the padding - // and record type byte is under the maximum plaintext size. - if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { - return 0, errors.New("plaintext size larger than maximum") - } - // The expected message types are application data, alert, and - // handshake. For application data, the bytes are directly copied into - // b. For an alert, the type of the alert is checked and the connection - // is closed on a close notify alert. For a handshake message, the - // handshake message type is checked. The handshake message type can be - // a key update type, for which we advance the traffic secret, and a - // new session ticket type, for which we send the received ticket to S2A - // for processing. - switch msgType { - case applicationData: - if len(p.handshakeBuf) > 0 { - return 0, errors.New("application data received while processing fragmented handshake messages") - } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } - case alert: - return 0, p.handleAlertMessage() - case handshake: - if err = p.handleHandshakeMessage(); err != nil { - return 0, err - } - return 0, nil - default: - return 0, errors.New("unknown record type") - } - } - // Write as much application data as possible to b, the output buffer. - n = copy(b, p.pendingApplicationData) - p.pendingApplicationData = p.pendingApplicationData[n:] - return n, nil -} - -// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a -// TLS 1.3 record (of type "application data") from each segment, and sends -// the record to the peer. It returns the number of plaintext bytes that were -// successfully sent to the peer. -func (p *conn) Write(b []byte) (n int, err error) { - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - return p.writeTLSRecord(b, tlsApplicationData) -} - -// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, -// builds a TLS 1.3 record (of type recordType) from each segment, and sends -// the record to the peer. It returns the number of plaintext bytes that were -// successfully sent to the peer. -func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { - // Create a record of only header, record type, and tag if given empty - // byte array. - if len(b) == 0 { - recordEndIndex, _, err := p.buildRecord(b, recordType, 0) - if err != nil { - return 0, err - } - - // Write the bytes stored in outRecordsBuf to p.Conn. Since we return - // the number of plaintext bytes written without overhead, we will - // always return 0 while p.Conn.Write returns the entire record length. - _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) - return 0, err - } - - numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) - totalRecordsSize := len(b) + numRecords*p.overheadSize - partialBSize := len(b) - if totalRecordsSize > outBufMaxSize { - totalRecordsSize = outBufMaxSize - partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize - } - if len(p.outRecordsBuf) < totalRecordsSize { - p.outRecordsBuf = make([]byte, totalRecordsSize) - } - for bStart := 0; bStart < len(b); bStart += partialBSize { - bEnd := bStart + partialBSize - if bEnd > len(b) { - bEnd = len(b) - } - partialB := b[bStart:bEnd] - recordEndIndex := 0 - for len(partialB) > 0 { - recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) - if err != nil { - // Return the amount of bytes written prior to the error. - return bStart, err - } - } - // Write the bytes stored in outRecordsBuf to p.Conn. If there is an - // error, calculate the total number of plaintext bytes of complete - // records successfully written to the peer and return it. - nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) - if err != nil { - numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) - return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err - } - } - return len(b), nil -} - -// buildRecord builds a TLS 1.3 record of type recordType from plaintext, -// and writes the record to outRecordsBuf at recordStartIndex. The record will -// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the -// index of outRecordsBuf where the current record ends, as well as any -// remaining plaintext bytes. -func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { - // Construct the payload, which consists of application data and record type. - dataLen := len(plaintext) - if dataLen > tlsRecordMaxPlaintextSize { - dataLen = tlsRecordMaxPlaintextSize - } - remainingPlaintext = plaintext[dataLen:] - newRecordBuf := p.outRecordsBuf[recordStartIndex:] - - copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) - newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType - payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. - // Construct the header. - newRecordBuf[0] = tlsApplicationData - newRecordBuf[1] = tlsLegacyRecordVersion - newRecordBuf[2] = tlsLegacyRecordVersion - binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) - header := newRecordBuf[:tlsRecordHeaderSize] - - // Encrypt the payload using header as aad. - encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) - if err != nil { - return 0, plaintext, err - } - recordStartIndex += len(header) + len(encryptedPayload) - return recordStartIndex, remainingPlaintext, nil -} - -func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } - return p.Conn.Close() -} - -// stripPaddingAndType strips the padding by zeros and record type from -// p.pendingApplicationData and returns the record type. Note that -// p.pendingApplicationData should be of the form: -// [application data] + [record type byte] + [trailing zeros] -func (p *conn) stripPaddingAndType() (recordType, error) { - if len(p.pendingApplicationData) == 0 { - return 0, errors.New("application data had length 0") - } - i := len(p.pendingApplicationData) - 1 - // Search for the index of the record type byte. - for i > 0 { - if p.pendingApplicationData[i] != 0 { - break - } - i-- - } - rt := recordType(p.pendingApplicationData[i]) - p.pendingApplicationData = p.pendingApplicationData[:i] - return rt, nil -} - -// readFullRecord reads from the wire until a record is completed and returns -// the full record. -func (p *conn) readFullRecord() (fullRecord []byte, err error) { - fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) - if err != nil { - return nil, err - } - // Check whether the next record to be decrypted has been completely - // received. - if len(fullRecord) == 0 { - copy(p.unusedBuf, p.nextRecord) - p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] - // Always copy next incomplete record to the beginning of the - // unusedBuf buffer and reset nextRecord to it. - p.nextRecord = p.unusedBuf - } - // Keep reading from the wire until we have a complete record. - for len(fullRecord) == 0 { - if len(p.unusedBuf) == cap(p.unusedBuf) { - tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) - copy(tmp, p.unusedBuf) - p.unusedBuf = tmp - } - n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) - if err != nil { - return nil, err - } - p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] - fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) - if err != nil { - return nil, err - } - } - return fullRecord, nil -} - -// parseReadBuffer parses the provided buffer and returns a full record and any -// remaining bytes in that buffer. If the record is incomplete, nil is returned -// for the first return value and the given byte buffer is returned for the -// second return value. The length of the payload specified by the header should -// not be greater than maxLen, otherwise an error is returned. Note that this -// function does not allocate or copy any buffers. -func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { - // If the header is not complete, return the provided buffer as remaining - // buffer. - if len(b) < tlsRecordHeaderSize { - return nil, b, nil - } - msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] - length := binary.BigEndian.Uint16(msgLenField) - if length > maxLen { - return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) - } - if len(b) < int(length)+tlsRecordHeaderSize { - // Record is not complete yet. - return nil, b, nil - } - return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil -} - -// splitAndValidateHeader splits the header from the payload in the TLS 1.3 -// record and returns them. Note that the header is checked for validity, and an -// error is returned when an invalid header is parsed. Also note that this -// function does not allocate or copy any buffers. -func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { - if len(record) < tlsRecordHeaderSize { - return nil, nil, fmt.Errorf("record was smaller than the header size") - } - header = record[:tlsRecordHeaderSize] - payload = record[tlsRecordHeaderSize:] - if header[0] != tlsApplicationData { - return nil, nil, fmt.Errorf("incorrect type in the header") - } - // Check the legacy record version, which should be 0x03, 0x03. - if header[1] != 0x03 || header[2] != 0x03 { - return nil, nil, fmt.Errorf("incorrect legacy record version in the header") - } - return header, payload, nil -} - -// handleAlertMessage handles an alert message. -func (p *conn) handleAlertMessage() error { - if len(p.pendingApplicationData) != tlsAlertSize { - return errors.New("invalid alert message size") - } - alertType := p.pendingApplicationData[1] - // Clear the body of the alert message. - p.pendingApplicationData = p.pendingApplicationData[:0] - if alertType == byte(closeNotify) { - return errors.New("received a close notify alert") - } - // TODO(matthewstevenson88): Add support for more alert types. - return fmt.Errorf("received an unrecognized alert type: %v", alertType) -} - -// parseHandshakeHeader parses a handshake message from the handshake buffer. -// It returns the message type, the message length, the message, the raw message -// that includes the type and length bytes and a flag indicating whether the -// handshake message has been fully parsed. i.e. whether the entire handshake -// message was in the handshake buffer. -func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { - // Handle the case where the 4 byte handshake header is fragmented. - if len(p.handshakeBuf) < tlsHandshakePrefixSize { - return 0, 0, nil, nil, false - } - msgType = p.handshakeBuf[0] - msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) - if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { - return 0, 0, nil, nil, false - } - msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] - rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] - p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] - return msgType, msgLen, msg, rawMsg, true -} - -// handleHandshakeMessage handles a handshake message. Note that the first -// complete handshake message from the handshake buffer is removed, if it -// exists. -func (p *conn) handleHandshakeMessage() error { - // Copy the pending application data to the handshake buffer. At this point, - // we are guaranteed that the pending application data contains only parts - // of a handshake message. - p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) - p.pendingApplicationData = p.pendingApplicationData[:0] - // Several handshake messages may be coalesced into a single record. - // Continue reading them until the handshake buffer is empty. - for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() - if !ok { - // The handshake could not be fully parsed, so read in another - // record and try again later. - break - } - switch handshakeMsgType { - case tlsHandshakeKeyUpdateType: - if msgLen != tlsHandshakeKeyUpdateMsgSize { - return errors.New("invalid handshake key update message length") - } - if len(p.handshakeBuf) != 0 { - return errors.New("key update message must be the last message of a handshake record") - } - if err := p.handleKeyUpdateMsg(msg); err != nil { - return err - } - case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } - default: - return errors.New("unknown handshake message type") - } - } - return nil -} - -func buildKeyUpdateRequest() []byte { - b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) - b[0] = tlsHandshakeKeyUpdateType - b[1] = 0 - b[2] = 0 - b[3] = tlsHandshakeKeyUpdateMsgSize - b[4] = byte(updateNotRequested) - return b -} - -// handleKeyUpdateMsg handles a key update message. -func (p *conn) handleKeyUpdateMsg(msg []byte) error { - keyUpdateRequest := msg[0] - if keyUpdateRequest != byte(updateNotRequested) && - keyUpdateRequest != byte(updateRequested) { - return errors.New("invalid handshake key update message") - } - if err := p.inConn.UpdateKey(); err != nil { - return err - } - // Send a key update message back to the peer if requested. - if keyUpdateRequest == byte(updateRequested) { - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) - if err != nil { - return err - } - if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { - return errors.New("key update request message wrote less bytes than expected") - } - if err = p.outConn.UpdateKey(); err != nil { - return err - } - } - return nil -} - -// bidEndianInt24 converts the given byte buffer of at least size 3 and -// outputs the resulting 24 bit integer as a uint32. This is needed because -// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does -// not provide a way to transform a byte buffer into a 3 byte integer. -func bigEndianInt24(b []byte) uint32 { - _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go deleted file mode 100644 index e51199ab3..000000000 --- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package record - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/google/s2a-go/internal/handshaker/service" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" - "github.com/google/s2a-go/internal/tokenmanager" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -// sessionTimeout is the timeout for creating a session with the S2A handshaker -// service. -const sessionTimeout = time.Second * 5 - -// s2aTicketSender sends session tickets to the S2A handshaker service. -type s2aTicketSender interface { - // sendTicketsToS2A sends the given session tickets to the S2A handshaker - // service. - sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) -} - -// ticketStream is the stream used to send and receive session information. -type ticketStream interface { - Send(*s2apb.SessionReq) error - Recv() (*s2apb.SessionResp, error) -} - -type ticketSender struct { - // hsAddr stores the address of the S2A handshaker service. - hsAddr string - // connectionID is the connection identifier that was created and sent by - // S2A at the end of a handshake. - connectionID uint64 - // localIdentity is the local identity that was used by S2A during session - // setup and included in the session result. - localIdentity *commonpb.Identity - // tokenManager manages access tokens for authenticating to S2A. - tokenManager tokenmanager.AccessTokenManager - // ensureProcessSessionTickets allows users to wait and ensure that all - // available session tickets are sent to S2A before a process completes. - ensureProcessSessionTickets *sync.WaitGroup -} - -// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker -// service. This is done asynchronously and writes to the error logs if an error -// occurs. -func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { - // Note that the goroutine is in the function rather than at the caller - // because the fake ticket sender used for testing must run synchronously - // so that the session tickets can be accessed from it after the tests have - // been run. - if t.ensureProcessSessionTickets != nil { - t.ensureProcessSessionTickets.Add(1) - } - go func() { - if err := func() error { - defer func() { - if t.ensureProcessSessionTickets != nil { - t.ensureProcessSessionTickets.Done() - } - }() - ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) - defer cancel() - // The transportCreds only needs to be set when talking to S2AV2 and also - // if mTLS is required. - hsConn, err := service.Dial(ctx, t.hsAddr, nil) - if err != nil { - return err - } - client := s2apb.NewS2AServiceClient(hsConn) - session, err := client.SetUpSession(ctx) - if err != nil { - return err - } - defer func() { - if err := session.CloseSend(); err != nil { - grpclog.Error(err) - } - }() - return t.writeTicketsToStream(session, sessionTickets) - }(); err != nil { - grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", - t.localIdentity, err) - } - callComplete <- true - close(callComplete) - }() -} - -// writeTicketsToStream writes the given session tickets to the given stream. -func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { - if err := stream.Send( - &s2apb.SessionReq{ - ReqOneof: &s2apb.SessionReq_ResumptionTicket{ - ResumptionTicket: &s2apb.ResumptionTicketReq{ - InBytes: sessionTickets, - ConnectionId: t.connectionID, - LocalIdentity: t.localIdentity, - }, - }, - AuthMechanisms: t.getAuthMechanisms(), - }, - ); err != nil { - return err - } - sessionResp, err := stream.Recv() - if err != nil { - return err - } - if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { - return fmt.Errorf("s2a session ticket response had error status: %v, %v", - sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) - } - return nil -} - -func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { - if t.tokenManager == nil { - return nil - } - // First handle the special case when no local identity has been provided - // by the application. In this case, an AuthenticationMechanism with no local - // identity will be sent. - if t.localIdentity == nil { - token, err := t.tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("unable to get token for empty local identity: %v", err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - - // Next, handle the case where the application (or the S2A) has specified - // a local identity. - token, err := t.tokenManager.Token(t.localIdentity) - if err != nil { - grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) - return nil - } - return []*s2apb.AuthenticationMechanism{ - { - Identity: t.localIdentity, - MechanismOneof: &s2apb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } -} diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go deleted file mode 100644 index ec96ba3b6..000000000 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tokenmanager provides tokens for authenticating to S2A. -package tokenmanager - -import ( - "fmt" - "os" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" -) - -const ( - s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" -) - -// AccessTokenManager manages tokens for authenticating to S2A. -type AccessTokenManager interface { - // DefaultToken returns a token that an application with no specified local - // identity must use to authenticate to S2A. - DefaultToken() (token string, err error) - // Token returns a token that an application with local identity equal to - // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) -} - -type singleTokenAccessTokenManager struct { - token string -} - -// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance -// that will always manage the same token. -// -// The token to be managed is read from the s2aAccessTokenEnvironmentVariable -// environment variable. If this environment variable is not set, then this -// function returns an error. -func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { - token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) - if !variableExists { - return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) - } - return &singleTokenAccessTokenManager{token: token}, nil -} - -// DefaultToken always returns the token managed by the -// singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { - return m.token, nil -} - -// Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { - return m.token, nil -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md deleted file mode 100644 index 3806d1e9c..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/README.md +++ /dev/null @@ -1 +0,0 @@ -**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go deleted file mode 100644 index cc811879b..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package certverifier offloads verifications to S2Av2. -package certverifier - -import ( - "crypto/x509" - "fmt" - - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and -// receives a SessionResp. -func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // Offload verification to S2Av2. - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") - } - if err := s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ - ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ - Mode: verificationMode, - PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ - ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ - CertificateChain: rawCerts, - }, - }, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") - return err - } - - // Get the response from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") - return err - } - - // Parse the response. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - - } - - if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { - return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) - } - - return nil - } -} - -// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and -// receives a SessionResp. -func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // Offload verification to S2Av2. - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") - } - if err := s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ - ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ - Mode: verificationMode, - PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ - ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ - CertificateChain: rawCerts, - ServerHostname: hostname, - SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, - }, - }, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") - return err - } - - // Get the response from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") - return err - } - - // Parse the response. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { - return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) - } - - return nil - } -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go deleted file mode 100644 index e7478d43f..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package remotesigner offloads private key operations to S2Av2. -package remotesigner - -import ( - "crypto" - "crypto/rsa" - "crypto/x509" - "fmt" - "io" - - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// remoteSigner implementes the crypto.Signer interface. -type remoteSigner struct { - leafCert *x509.Certificate - s2AStream stream.S2AStream -} - -// New returns an instance of RemoteSigner, an implementation of the -// crypto.Signer interface. -func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer { - return &remoteSigner{leafCert, s2AStream} -} - -func (s *remoteSigner) Public() crypto.PublicKey { - return s.leafCert.PublicKey -} - -func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { - signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert) - if err != nil { - return nil, err - } - - req, err := getSignReq(signatureAlgorithm, digest) - if err != nil { - return nil, err - } - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for signing operation.") - } - if err := s.s2AStream.Send(&s2av2pb.SessionReq{ - ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{ - OffloadPrivateKeyOperationReq: req, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for signing operation.") - return nil, err - } - - resp, err := s.s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive signing operation response from S2Av2.") - return nil, err - } - - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil -} - -// getCert returns the leafCert field in s. -func (s *remoteSigner) getCert() *x509.Certificate { - return s.leafCert -} - -// getStream returns the s2AStream field in s. -func (s *remoteSigner) getStream() stream.S2AStream { - return s.s2AStream -} - -func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) { - if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{ - Sha256Digest: digest, - }, - }, nil - } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{ - Sha384Digest: digest, - }, - }, nil - } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) { - return &s2av2pb.OffloadPrivateKeyOperationReq{ - Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, - SignatureAlgorithm: signatureAlgorithm, - InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{ - Sha512Digest: digest, - }, - }, nil - } else { - return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm) - } -} - -// getSignatureAlgorithm returns the signature algorithm that S2A must use when -// performing a signing operation that has been offloaded by an application -// using the crypto/tls libraries. -func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) { - if opts == nil || leafCert == nil { - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } - switch leafCert.PublicKeyAlgorithm { - case x509.RSA: - if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok { - return rsaPSSAlgorithm(rsaPSSOpts) - } - return rsaPPKCS1Algorithm(opts) - case x509.ECDSA: - return ecdsaAlgorithm(opts) - case x509.Ed25519: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm) - } -} - -func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} - -func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} - -func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { - switch opts.HashFunc() { - case crypto.SHA256: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil - case crypto.SHA384: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil - case crypto.SHA512: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil - default: - return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") - } -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go deleted file mode 100644 index 85a8379d8..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v2 provides the S2Av2 transport credentials used by a gRPC -// application. -package v2 - -import ( - "context" - "crypto/tls" - "errors" - "net" - "os" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/internal/handshaker/service" - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2/tlsconfigstore" - "github.com/google/s2a-go/retry" - "github.com/google/s2a-go/stream" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - s2aSecurityProtocol = "tls" - defaultS2ATimeout = 6 * time.Second -) - -// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. -const s2aTimeoutEnv = "S2A_TIMEOUT" - -type s2av2TransportCreds struct { - info *credentials.ProtocolInfo - isClient bool - serverName string - s2av2Address string - transportCreds credentials.TransportCredentials - tokenManager *tokenmanager.AccessTokenManager - // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity - // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode - fallbackClientHandshake fallback.ClientHandshake - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) - serverAuthorizationPolicy []byte -} - -// NewClientCreds returns a client-side transport credentials object that uses -// the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { - // Create an AccessTokenManager instance to use to authenticate to S2Av2. - accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - - creds := &s2av2TransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - isClient: true, - serverName: "", - s2av2Address: s2av2Address, - transportCreds: transportCreds, - localIdentity: localIdentity, - verificationMode: verificationMode, - fallbackClientHandshake: fallbackClientHandshakeFunc, - getS2AStream: getS2AStream, - serverAuthorizationPolicy: serverAuthorizationPolicy, - } - if err != nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &accessTokenManager - } - if grpclog.V(1) { - grpclog.Info("Created client S2Av2 transport credentials.") - } - return creds, nil -} - -// NewServerCreds returns a server-side transport credentials object that uses -// the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { - // Create an AccessTokenManager instance to use to authenticate to S2Av2. - accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - creds := &s2av2TransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - isClient: false, - s2av2Address: s2av2Address, - transportCreds: transportCreds, - localIdentities: localIdentities, - verificationMode: verificationMode, - getS2AStream: getS2AStream, - } - if err != nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &accessTokenManager - } - if grpclog.V(1) { - grpclog.Info("Created server S2Av2 transport credentials.") - } - return creds, nil -} - -// ClientHandshake performs a client-side mTLS handshake using the S2Av2. -func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if !c.isClient { - return nil, nil, errors.New("client handshake called using server transport credentials") - } - // Remove the port from serverAuthority. - serverName := removeServerNamePort(serverAuthority) - timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) - defer cancel() - var s2AStream stream.S2AStream - var err error - retry.Run(timeoutCtx, - func() error { - s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.transportCreds, c.getS2AStream) - return err - }) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - defer s2AStream.CloseSend() - if grpclog.V(1) { - grpclog.Infof("Connected to S2Av2.") - } - var config *tls.Config - - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - - sn := serverName - if c.serverName != "" { - sn = c.serverName - } - retry.Run(timeoutCtx, - func() error { - config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - return err - }) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - if grpclog.V(1) { - grpclog.Infof("Got client TLS config from S2Av2.") - } - - creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) - if err != nil { - grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) - - return conn, authInfo, err -} - -// ServerHandshake performs a server-side mTLS handshake using the S2Av2. -func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if c.isClient { - return nil, nil, errors.New("server handshake called using client transport credentials") - } - ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) - defer cancel() - var s2AStream stream.S2AStream - var err error - retry.Run(ctx, - func() error { - s2AStream, err = createStream(ctx, c.s2av2Address, c.transportCreds, c.getS2AStream) - return err - }) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - return nil, nil, err - } - defer s2AStream.CloseSend() - if grpclog.V(1) { - grpclog.Infof("Connected to S2Av2.") - } - - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - - var config *tls.Config - retry.Run(ctx, - func() error { - config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) - return err - }) - if err != nil { - grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) - return nil, nil, err - } - if grpclog.V(1) { - grpclog.Infof("Got server TLS config from S2Av2.") - } - - creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) - if err != nil { - grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) - return nil, nil, err - } - return conn, authInfo, err -} - -// Info returns protocol info of s2av2TransportCreds. -func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo { - return *c.info -} - -// Clone makes a deep copy of s2av2TransportCreds. -func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { - info := *c.info - serverName := c.serverName - fallbackClientHandshake := c.fallbackClientHandshake - - s2av2Address := c.s2av2Address - var tokenManager tokenmanager.AccessTokenManager - if c.tokenManager == nil { - tokenManager = nil - } else { - tokenManager = *c.tokenManager - } - verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity - if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) - } - var localIdentities []*commonpbv1.Identity - if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) - for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) - } - } - creds := &s2av2TransportCreds{ - info: &info, - isClient: c.isClient, - serverName: serverName, - fallbackClientHandshake: fallbackClientHandshake, - s2av2Address: s2av2Address, - localIdentity: localIdentity, - localIdentities: localIdentities, - verificationMode: verificationMode, - } - if c.tokenManager == nil { - creds.tokenManager = nil - } else { - creds.tokenManager = &tokenManager - } - return creds -} - -// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as -// a client. The tls.Config MUST only be used to establish a single TLS connection. -func NewClientTLSConfig( - ctx context.Context, - s2av2Address string, - transportCreds credentials.TransportCredentials, - tokenManager tokenmanager.AccessTokenManager, - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, - serverName string, - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) - if err != nil { - grpclog.Infof("Failed to connect to S2Av2: %v", err) - return nil, err - } - - return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy) -} - -// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol -// info. The ServerName MUST be a hostname. -func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error { - serverName := removeServerNamePort(serverNameOverride) - c.info.ServerName = serverName - c.serverName = serverName - return nil -} - -// Remove the trailing port from server name. -func removeServerNamePort(serverName string) string { - name, _, err := net.SplitHostPort(serverName) - if err != nil { - name = serverName - } - return name -} - -type s2AGrpcStream struct { - stream s2av2pb.S2AService_SetUpSessionClient -} - -func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error { - return x.stream.Send(m) -} - -func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) { - return x.stream.Recv() -} - -func (x s2AGrpcStream) CloseSend() error { - return x.stream.CloseSend() -} - -func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { - if getS2AStream != nil { - return getS2AStream(ctx, s2av2Address) - } - // TODO(rmehta19): Consider whether to close the connection to S2Av2. - conn, err := service.Dial(ctx, s2av2Address, transportCreds) - if err != nil { - return nil, err - } - client := s2av2pb.NewS2AServiceClient(conn) - gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...) - if err != nil { - return nil, err - } - return &s2AGrpcStream{ - stream: gRPCStream, - }, nil -} - -// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake. -func GetS2ATimeout() time.Duration { - timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv)) - if err != nil { - return defaultS2ATimeout - } - return timeout -} diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go deleted file mode 100644 index 4d9191322..000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ /dev/null @@ -1,404 +0,0 @@ -/* - * - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package tlsconfigstore offloads operations to S2Av2. -package tlsconfigstore - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2/certverifier" - "github.com/google/s2a-go/internal/v2/remotesigner" - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" - commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - // HTTP/2 - h2 = "h2" -) - -// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) - - if grpclog.V(1) { - grpclog.Infof("Sending request to S2Av2 for client TLS config.") - } - // Send request to S2Av2 for config. - if err := s2AStream.Send(&s2av2pb.SessionReq{ - LocalIdentity: localIdentity, - AuthenticationMechanisms: authMechanisms, - ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ - GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ - ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT, - }, - }, - }); err != nil { - grpclog.Infof("Failed to send request to S2Av2 for client TLS config") - return nil, err - } - - // Get the response containing config from S2Av2. - resp, err := s2AStream.Recv() - if err != nil { - grpclog.Infof("Failed to receive client TLS config response from S2Av2.") - return nil, err - } - - // TODO(rmehta19): Add unit test for this if statement. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - // Extract TLS configiguration from SessionResp. - tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() - - var cert tls.Certificate - for i, v := range tlsConfig.CertificateChain { - // Populate Certificates field. - block, _ := pem.Decode([]byte(v)) - if block == nil { - return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") - } - x509Cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - cert.Certificate = append(cert.Certificate, x509Cert.Raw) - if i == 0 { - cert.Leaf = x509Cert - } - } - - if len(tlsConfig.CertificateChain) > 0 { - cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) - if cert.PrivateKey == nil { - return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") - } - } - - minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) - if err != nil { - return nil, err - } - - // Create mTLS credentials for client. - config := &tls.Config{ - VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), - ServerName: serverHostname, - InsecureSkipVerify: true, // NOLINT - ClientSessionCache: nil, - SessionTicketsDisabled: true, - MinVersion: minVersion, - MaxVersion: maxVersion, - NextProtos: []string{h2}, - } - if len(tlsConfig.CertificateChain) > 0 { - config.Certificates = []tls.Certificate{cert} - } - return config, nil -} - -// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { - return &tls.Config{ - GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), - }, nil -} - -// ClientConfig builds a TLS config for a server to establish a secure -// connection with a client, based on SNI communicated during ClientHello. -// Ensures that server presents the correct certificate to establish a TLS -// connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { - return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { - tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) - if err != nil { - return nil, err - } - - var cert tls.Certificate - for i, v := range tlsConfig.CertificateChain { - // Populate Certificates field. - block, _ := pem.Decode([]byte(v)) - if block == nil { - return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") - } - x509Cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - cert.Certificate = append(cert.Certificate, x509Cert.Raw) - if i == 0 { - cert.Leaf = x509Cert - } - } - - cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) - if cert.PrivateKey == nil { - return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") - } - - minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) - if err != nil { - return nil, err - } - - clientAuth := getTLSClientAuthType(tlsConfig) - - var cipherSuites []uint16 - cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) - - // Create mTLS credentials for server. - return &tls.Config{ - Certificates: []tls.Certificate{cert}, - VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), - ClientAuth: clientAuth, - CipherSuites: cipherSuites, - SessionTicketsDisabled: true, - MinVersion: minVersion, - MaxVersion: maxVersion, - NextProtos: []string{h2}, - }, nil - } -} - -func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { - var tlsGoCipherSuites []uint16 - for _, v := range tlsConfigCipherSuites { - s := getTLSCipherSuite(v) - if s != 0xffff { - tlsGoCipherSuites = append(tlsGoCipherSuites, s) - } - } - return tlsGoCipherSuites -} - -func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { - switch tlsCipherSuite { - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: - return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: - return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: - return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: - return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: - return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: - return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - default: - return 0xffff - } -} - -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { - authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity - if localIdentities != nil { - locID = localIdentities[0] - } - - if err := s2AStream.Send(&s2av2pb.SessionReq{ - LocalIdentity: locID, - AuthenticationMechanisms: authMechanisms, - ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ - GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ - ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, - Sni: sni, - }, - }, - }); err != nil { - return nil, err - } - - resp, err := s2AStream.Recv() - if err != nil { - return nil, err - } - - // TODO(rmehta19): Add unit test for this if statement. - if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { - return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) - } - - return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil -} - -func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { - var clientAuth tls.ClientAuthType - switch x := tlsConfig.RequestClientCertificate; x { - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: - clientAuth = tls.NoClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - clientAuth = tls.RequestClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: - // This case actually maps to tls.VerifyClientCertIfGiven. However this - // mapping triggers normal verification, followed by custom verification, - // specified in VerifyPeerCertificate. To bypass normal verification, and - // only do custom verification we set clientAuth to RequireAnyClientCert or - // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full - // discussion. - clientAuth = tls.RequireAnyClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: - clientAuth = tls.RequireAnyClientCert - case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: - // This case actually maps to tls.RequireAndVerifyClientCert. However this - // mapping triggers normal verification, followed by custom verification, - // specified in VerifyPeerCertificate. To bypass normal verification, and - // only do custom verification we set clientAuth to RequireAnyClientCert or - // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full - // discussion. - clientAuth = tls.RequireAnyClientCert - default: - clientAuth = tls.RequireAnyClientCert - } - return clientAuth -} - -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { - if tokenManager == nil { - return nil - } - if len(localIdentities) == 0 { - token, err := tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("Unable to get token for empty local identity: %v", err) - return nil - } - return []*s2av2pb.AuthenticationMechanism{ - { - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }, - } - } - var authMechanisms []*s2av2pb.AuthenticationMechanism - for _, localIdentity := range localIdentities { - if localIdentity == nil { - token, err := tokenManager.DefaultToken() - if err != nil { - grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) - continue - } - authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }) - } else { - token, err := tokenManager.Token(localIdentity) - if err != nil { - grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) - continue - } - authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ - Identity: localIdentity, - MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ - Token: token, - }, - }) - } - } - return authMechanisms -} - -// TODO(rmehta19): refactor switch statements into a helper function. -func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { - // Map S2Av2 TLSVersion to consts defined in tls package. - var minVersion uint16 - var maxVersion uint16 - switch x := tlsConfig.MinTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - minVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - minVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - minVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - minVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) - } - - switch x := tlsConfig.MaxTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - maxVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - maxVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - maxVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - maxVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) - } - if minVersion > maxVersion { - return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") - } - return minVersion, maxVersion, nil -} - -func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { - // Map S2Av2 TLSVersion to consts defined in tls package. - var minVersion uint16 - var maxVersion uint16 - switch x := tlsConfig.MinTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - minVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - minVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - minVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - minVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) - } - - switch x := tlsConfig.MaxTlsVersion; x { - case commonpb.TLSVersion_TLS_VERSION_1_0: - maxVersion = tls.VersionTLS10 - case commonpb.TLSVersion_TLS_VERSION_1_1: - maxVersion = tls.VersionTLS11 - case commonpb.TLSVersion_TLS_VERSION_1_2: - maxVersion = tls.VersionTLS12 - case commonpb.TLSVersion_TLS_VERSION_1_3: - maxVersion = tls.VersionTLS13 - default: - return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) - } - if minVersion > maxVersion { - return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") - } - return minVersion, maxVersion, nil -} diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go deleted file mode 100644 index f7e0a2377..000000000 --- a/vendor/github.com/google/s2a-go/retry/retry.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package retry provides a retry helper for talking to S2A gRPC server. -// The implementation is modeled after -// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go -package retry - -import ( - "context" - "math/rand" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - maxRetryAttempts = 5 - maxRetryForLoops = 10 -) - -type defaultBackoff struct { - max time.Duration - mul float64 - cur time.Duration -} - -// Pause returns a duration, which is used as the backoff wait time -// before the next retry. -func (b *defaultBackoff) Pause() time.Duration { - d := time.Duration(1 + rand.Int63n(int64(b.cur))) - b.cur = time.Duration(float64(b.cur) * b.mul) - if b.cur > b.max { - b.cur = b.max - } - return d -} - -// Sleep will wait for the specified duration or return on context -// expiration. -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -// NewRetryer creates an instance of S2ARetryer using the defaultBackoff -// implementation. -var NewRetryer = func() *S2ARetryer { - return &S2ARetryer{bo: &defaultBackoff{ - cur: 100 * time.Millisecond, - max: 30 * time.Second, - mul: 2, - }} -} - -type backoff interface { - Pause() time.Duration -} - -// S2ARetryer implements a retry helper for talking to S2A gRPC server. -type S2ARetryer struct { - bo backoff - attempts int -} - -// Attempts return the number of retries attempted. -func (r *S2ARetryer) Attempts() int { - return r.attempts -} - -// Retry returns a boolean indicating whether retry should be performed -// and the backoff duration. -func (r *S2ARetryer) Retry(err error) (time.Duration, bool) { - if err == nil { - return 0, false - } - if r.attempts >= maxRetryAttempts { - return 0, false - } - r.attempts++ - return r.bo.Pause(), true -} - -// Run uses S2ARetryer to execute the function passed in, until success or reaching -// max number of retry attempts. -func Run(ctx context.Context, f func() error) { - retryer := NewRetryer() - forLoopCnt := 0 - var err error - for { - err = f() - if bo, shouldRetry := retryer.Retry(err); shouldRetry { - if grpclog.V(1) { - grpclog.Infof("will attempt retry: %v", err) - } - if ctx.Err() != nil { - if grpclog.V(1) { - grpclog.Infof("exit retry loop due to context error: %v", ctx.Err()) - } - break - } - if errSleep := Sleep(ctx, bo); errSleep != nil { - if grpclog.V(1) { - grpclog.Infof("exit retry loop due to sleep error: %v", errSleep) - } - break - } - // This shouldn't happen, just make sure we are not stuck in the for loops. - forLoopCnt++ - if forLoopCnt > maxRetryForLoops { - if grpclog.V(1) { - grpclog.Infof("exit the for loop after too many retries") - } - break - } - continue - } - if grpclog.V(1) { - grpclog.Infof("retry conditions not met, exit the loop") - } - break - } -} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go deleted file mode 100644 index 5ecb06f93..000000000 --- a/vendor/github.com/google/s2a-go/s2a.go +++ /dev/null @@ -1,427 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package s2a provides the S2A transport credentials used by a gRPC -// application. -package s2a - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/internal/handshaker" - "github.com/google/s2a-go/internal/handshaker/service" - "github.com/google/s2a-go/internal/tokenmanager" - "github.com/google/s2a-go/internal/v2" - "github.com/google/s2a-go/retry" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -const ( - s2aSecurityProtocol = "tls" - // defaultTimeout specifies the default server handshake timeout. - defaultTimeout = 30.0 * time.Second -) - -// s2aTransportCreds are the transport credentials required for establishing -// a secure connection using the S2A. They implement the -// credentials.TransportCredentials interface. -type s2aTransportCreds struct { - info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion - // tlsCiphersuites contains the ciphersuites used in the S2A connection. - // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite - // localIdentity should only be used by the client. - localIdentity *commonpb.Identity - // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity - // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity - isClient bool - s2aAddr string - ensureProcessSessionTickets *sync.WaitGroup -} - -// NewClientCreds returns a client-side transport credentials object that uses -// the S2A to establish a secure connection with a server. -func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { - if opts == nil { - return nil, errors.New("nil client options") - } - var targetIdentities []*commonpb.Identity - for _, targetIdentity := range opts.TargetIdentities { - protoTargetIdentity, err := toProtoIdentity(targetIdentity) - if err != nil { - return nil, err - } - targetIdentities = append(targetIdentities, protoTargetIdentity) - } - localIdentity, err := toProtoIdentity(opts.LocalIdentity) - if err != nil { - return nil, err - } - if opts.EnableLegacyMode { - return &s2aTransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, - }, - localIdentity: localIdentity, - targetIdentities: targetIdentities, - isClient: true, - s2aAddr: opts.S2AAddress, - ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, - }, nil - } - verificationMode := getVerificationMode(opts.VerificationMode) - var fallbackFunc fallback.ClientHandshake - if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { - fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc - } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) -} - -// NewServerCreds returns a server-side transport credentials object that uses -// the S2A to establish a secure connection with a client. -func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { - if opts == nil { - return nil, errors.New("nil server options") - } - var localIdentities []*commonpb.Identity - for _, localIdentity := range opts.LocalIdentities { - protoLocalIdentity, err := toProtoIdentity(localIdentity) - if err != nil { - return nil, err - } - localIdentities = append(localIdentities, protoLocalIdentity) - } - if opts.EnableLegacyMode { - return &s2aTransportCreds{ - info: &credentials.ProtocolInfo{ - SecurityProtocol: s2aSecurityProtocol, - }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, - }, - localIdentities: localIdentities, - isClient: false, - s2aAddr: opts.S2AAddress, - }, nil - } - verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) -} - -// ClientHandshake initiates a client-side TLS handshake using the S2A. -func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if !c.isClient { - return nil, nil, errors.New("client handshake called using server transport credentials") - } - - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - - // Connect to the S2A. - hsConn, err := service.Dial(ctx, c.s2aAddr, nil) - if err != nil { - grpclog.Infof("Failed to connect to S2A: %v", err) - return nil, nil, err - } - - opts := &handshaker.ClientHandshakerOptions{ - MinTLSVersion: c.minTLSVersion, - MaxTLSVersion: c.maxTLSVersion, - TLSCiphersuites: c.tlsCiphersuites, - TargetIdentities: c.targetIdentities, - LocalIdentity: c.localIdentity, - TargetName: serverAuthority, - EnsureProcessSessionTickets: c.ensureProcessSessionTickets, - } - chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) - if err != nil { - grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) - return nil, nil, err - } - defer func() { - if err != nil { - if closeErr := chs.Close(); closeErr != nil { - grpclog.Infof("Close failed unexpectedly: %v", err) - err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) - } - } - }() - - secConn, authInfo, err := chs.ClientHandshake(context.Background()) - if err != nil { - grpclog.Infof("Handshake failed: %v", err) - return nil, nil, err - } - return secConn, authInfo, nil -} - -// ServerHandshake initiates a server-side TLS handshake using the S2A. -func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if c.isClient { - return nil, nil, errors.New("server handshake called using client transport credentials") - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - - // Connect to the S2A. - hsConn, err := service.Dial(ctx, c.s2aAddr, nil) - if err != nil { - grpclog.Infof("Failed to connect to S2A: %v", err) - return nil, nil, err - } - - opts := &handshaker.ServerHandshakerOptions{ - MinTLSVersion: c.minTLSVersion, - MaxTLSVersion: c.maxTLSVersion, - TLSCiphersuites: c.tlsCiphersuites, - LocalIdentities: c.localIdentities, - } - shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) - if err != nil { - grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) - return nil, nil, err - } - defer func() { - if err != nil { - if closeErr := shs.Close(); closeErr != nil { - grpclog.Infof("Close failed unexpectedly: %v", err) - err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) - } - } - }() - - secConn, authInfo, err := shs.ServerHandshake(context.Background()) - if err != nil { - grpclog.Infof("Handshake failed: %v", err) - return nil, nil, err - } - return secConn, authInfo, nil -} - -func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { - return *c.info -} - -func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { - info := *c.info - var localIdentity *commonpb.Identity - if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) - } - var localIdentities []*commonpb.Identity - if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) - for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) - } - } - var targetIdentities []*commonpb.Identity - if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) - for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) - } - } - return &s2aTransportCreds{ - info: &info, - minTLSVersion: c.minTLSVersion, - maxTLSVersion: c.maxTLSVersion, - tlsCiphersuites: c.tlsCiphersuites, - localIdentity: localIdentity, - localIdentities: localIdentities, - targetIdentities: targetIdentities, - isClient: c.isClient, - s2aAddr: c.s2aAddr, - } -} - -func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { - c.info.ServerName = serverNameOverride - return nil -} - -// TLSClientConfigOptions specifies parameters for creating client TLS config. -type TLSClientConfigOptions struct { - // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. - // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ - // ServerName: "example.com", - // }) - ServerName string -} - -// TLSClientConfigFactory defines the interface for a client TLS config factory. -type TLSClientConfigFactory interface { - Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) -} - -// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. -func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { - if opts == nil { - return nil, fmt.Errorf("opts must be non-nil") - } - if opts.EnableLegacyMode { - return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") - } - tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() - if err != nil { - // The only possible error is: access token not set in the environment, - // which is okay in environments other than serverless. - grpclog.Infof("Access token manager not initialized: %v", err) - return &s2aTLSClientConfigFactory{ - s2av2Address: opts.S2AAddress, - transportCreds: opts.TransportCreds, - tokenManager: nil, - verificationMode: getVerificationMode(opts.VerificationMode), - serverAuthorizationPolicy: opts.serverAuthorizationPolicy, - }, nil - } - return &s2aTLSClientConfigFactory{ - s2av2Address: opts.S2AAddress, - transportCreds: opts.TransportCreds, - tokenManager: tokenManager, - verificationMode: getVerificationMode(opts.VerificationMode), - serverAuthorizationPolicy: opts.serverAuthorizationPolicy, - }, nil -} - -type s2aTLSClientConfigFactory struct { - s2av2Address string - transportCreds credentials.TransportCredentials - tokenManager tokenmanager.AccessTokenManager - verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode - serverAuthorizationPolicy []byte -} - -func (f *s2aTLSClientConfigFactory) Build( - ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { - serverName := "" - if opts != nil && opts.ServerName != "" { - serverName = opts.ServerName - } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) -} - -func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { - switch verificationMode { - case ConnectToGoogle: - return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE - case Spiffe: - return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE - default: - return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED - } -} - -// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. -// Example use with http.RoundTripper: -// -// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ -// S2AAddress: s2aAddress, // required -// }) -// transport := http.DefaultTransport -// transport.DialTLSContext = dialTLSContext -func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { - - return func(ctx context.Context, network, addr string) (net.Conn, error) { - - fallback := func(err error) (net.Conn, error) { - if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && - opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { - fbDialer := opts.FallbackOpts.FallbackDialer - grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) - fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) - if fbErr != nil { - return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) - } - return fbConn, nil - } - return nil, err - } - - factory, err := NewTLSClientConfigFactory(opts) - if err != nil { - grpclog.Infof("error creating S2A client config factory: %v", err) - return fallback(err) - } - - serverName, _, err := net.SplitHostPort(addr) - if err != nil { - serverName = addr - } - timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) - defer cancel() - - var s2aTLSConfig *tls.Config - retry.Run(timeoutCtx, - func() error { - s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ - ServerName: serverName, - }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } - - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { - c, err = s2aDialer.DialContext(timeoutCtx, network, addr) - return err - }) - if err != nil { - grpclog.Infof("error dialing with S2A to %s: %v", addr, err) - return fallback(err) - } - grpclog.Infof("success dialing MTLS to %s with S2A", addr) - return c, nil - } -} diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go deleted file mode 100644 index fcdbc1621..000000000 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package s2a - -import ( - "context" - "crypto/tls" - "errors" - "sync" - - "github.com/google/s2a-go/fallback" - "github.com/google/s2a-go/stream" - "google.golang.org/grpc/credentials" - - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" -) - -// Identity is the interface for S2A identities. -type Identity interface { - // Name returns the name of the identity. - Name() string -} - -type spiffeID struct { - spiffeID string -} - -func (s *spiffeID) Name() string { return s.spiffeID } - -// NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} - -type hostname struct { - hostname string -} - -func (h *hostname) Name() string { return h.hostname } - -// NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} - -type uid struct { - uid string -} - -func (h *uid) Name() string { return h.uid } - -// NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} - -// VerificationModeType specifies the mode that S2A must use to verify the peer -// certificate chain. -type VerificationModeType int - -// Three types of verification modes. -const ( - Unspecified = iota - ConnectToGoogle - Spiffe -) - -// ClientOptions contains the client-side options used to establish a secure -// channel using the S2A handshaker service. -type ClientOptions struct { - // TargetIdentities contains a list of allowed server identities. One of the - // target identities should match the peer identity in the handshake - // result; otherwise, the handshake fails. - TargetIdentities []Identity - // LocalIdentity is the local identity of the client application. If none is - // provided, then the S2A will choose the default identity, if one exists. - LocalIdentity Identity - // S2AAddress is the address of the S2A. - S2AAddress string - // Optional transport credentials. - // If set, this will be used for the gRPC connection to the S2A server. - TransportCreds credentials.TransportCredentials - // EnsureProcessSessionTickets waits for all session tickets to be sent to - // S2A before a process completes. - // - // This functionality is crucial for processes that complete very soon after - // using S2A to establish a TLS connection, but it can be ignored for longer - // lived processes. - // - // Usage example: - // func main() { - // var ensureProcessSessionTickets sync.WaitGroup - // clientOpts := &s2a.ClientOptions{ - // EnsureProcessSessionTickets: &ensureProcessSessionTickets, - // // Set other members. - // } - // creds, _ := s2a.NewClientCreds(clientOpts) - // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) - // defer conn.Close() - // - // // Make RPC call. - // - // // The process terminates right after the RPC call ends. - // // ensureProcessSessionTickets can be used to ensure resumption - // // tickets are fully processed. If the process is long-lived, using - // // ensureProcessSessionTickets is not necessary. - // ensureProcessSessionTickets.Wait() - // } - EnsureProcessSessionTickets *sync.WaitGroup - // If true, enables the use of legacy S2Av1. - EnableLegacyMode bool - // VerificationMode specifies the mode that S2A must use to verify the - // peer certificate chain. - VerificationMode VerificationModeType - - // Optional fallback after dialing with S2A fails. - FallbackOpts *FallbackOptions - - // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) - - // Serialized user specified policy for server authorization. - serverAuthorizationPolicy []byte -} - -// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. -type FallbackOptions struct { - // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). - // It will be called by ClientHandshake function, after handshake with S2A fails. - // s2a.NewClientCreds() ignores the other FallbackDialer field. - FallbackClientHandshakeFunc fallback.ClientHandshake - - // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). - // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. - // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. - FallbackDialer *FallbackDialer -} - -// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. -type FallbackDialer struct { - // Dialer specifies a fallback tls.Dialer. - Dialer *tls.Dialer - // ServerAddr is used by Dialer to establish fallback connection. - ServerAddr string -} - -// DefaultClientOptions returns the default client options. -func DefaultClientOptions(s2aAddress string) *ClientOptions { - return &ClientOptions{ - S2AAddress: s2aAddress, - VerificationMode: ConnectToGoogle, - } -} - -// ServerOptions contains the server-side options used to establish a secure -// channel using the S2A handshaker service. -type ServerOptions struct { - // LocalIdentities is the list of local identities that may be assumed by - // the server. If no local identity is specified, then the S2A chooses a - // default local identity, if one exists. - LocalIdentities []Identity - // S2AAddress is the address of the S2A. - S2AAddress string - // Optional transport credentials. - // If set, this will be used for the gRPC connection to the S2A server. - TransportCreds credentials.TransportCredentials - // If true, enables the use of legacy S2Av1. - EnableLegacyMode bool - // VerificationMode specifies the mode that S2A must use to verify the - // peer certificate chain. - VerificationMode VerificationModeType - - // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) -} - -// DefaultServerOptions returns the default server options. -func DefaultServerOptions(s2aAddress string) *ServerOptions { - return &ServerOptions{ - S2AAddress: s2aAddress, - VerificationMode: ConnectToGoogle, - } -} - -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { - if identity == nil { - return nil, nil - } - switch id := identity.(type) { - case *spiffeID: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil - case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil - case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil - default: - return nil, errors.New("unrecognized identity type") - } -} diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go deleted file mode 100644 index d649cc461..000000000 --- a/vendor/github.com/google/s2a-go/s2a_utils.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package s2a - -import ( - "context" - "errors" - - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/peer" -) - -// AuthInfo exposes security information from the S2A to the application. -type AuthInfo interface { - // AuthType returns the authentication type. - AuthType() string - // ApplicationProtocol returns the application protocol, e.g. "grpc". - ApplicationProtocol() string - // TLSVersion returns the TLS version negotiated during the handshake. - TLSVersion() commonpb.TLSVersion - // Ciphersuite returns the ciphersuite negotiated during the handshake. - Ciphersuite() commonpb.Ciphersuite - // PeerIdentity returns the authenticated identity of the peer. - PeerIdentity() *commonpb.Identity - // LocalIdentity returns the local identity of the application used during - // session setup. - LocalIdentity() *commonpb.Identity - // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in - // the S2A handshake. - PeerCertFingerprint() []byte - // LocalCertFingerprint returns the SHA256 hash of the local certificate used - // in the S2A handshake. - LocalCertFingerprint() []byte - // IsHandshakeResumed returns true if a cached session was used to resume - // the handshake. - IsHandshakeResumed() bool - // SecurityLevel returns the security level of the connection. - SecurityLevel() credentials.SecurityLevel -} - -// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given -// peer, if it exists. This API should be used by gRPC clients after -// obtaining a peer object using the grpc.Peer() CallOption. -func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { - s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) - if !ok { - return nil, errors.New("no S2AAuthInfo found in Peer") - } - return s2aAuthInfo, nil -} - -// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given -// context, if it exists. This API should be used by gRPC server RPC handlers -// to get information about the peer. On the client-side, use the grpc.Peer() -// CallOption and the AuthInfoFromPeer function. -func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { - p, ok := peer.FromContext(ctx) - if !ok { - return nil, errors.New("no Peer found in Context") - } - return AuthInfoFromPeer(p) -} diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go deleted file mode 100644 index 584bf32b1..000000000 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package stream provides an interface for bidirectional streaming to the S2A server. -package stream - -import ( - s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" -) - -// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. -type S2AStream interface { - // Send sends the message to the S2A server. - Send(*s2av2pb.SessionReq) error - // Recv receives the message from the S2A server. - Recv() (*s2av2pb.SessionResp, error) - // Closes the channel to the S2A server. - CloseSend() error -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md deleted file mode 100644 index 7ec5ac7ea..000000000 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ /dev/null @@ -1,41 +0,0 @@ -# Changelog - -## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) - - -### Features - -* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) - - -### Bug Fixes - -* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) -* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) - -## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) - - -### Features - -* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) - -## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) - - -### Features - -* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) - -### Fixes - -* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) - -## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) - - -### Bug Fixes - -* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) - -## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index a502fdc51..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,26 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Tips - -Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). - -Always try to include a test case! If it is not possible or not necessary, -please explain why in the pull request description. - -### Releasing - -Commits that would precipitate a SemVer change, as described in the Conventional -Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) -to create a release candidate pull request. Once submitted, `release-please` -will create a release. - -For tips on how to work with `release-please`, see its documentation. - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6b..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d..000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 3e9a61889..000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# uuid -The uuid package generates and inspects UUIDs based on -[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -```sh -go get github.com/google/uuid -``` - -###### Documentation -[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d3..000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af..000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index dc60082d3..000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros - - // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. - Max = UUID{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd34072..000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b06..000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index b2a0bc871..000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This removes the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd..000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go deleted file mode 100644 index d7fcbf286..000000000 --- a/vendor/github.com/google/uuid/null.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index 2e02ec06c..000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index c35112927..000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. -func (uuid UUID) Time() Time { - var t Time - switch uuid.Version() { - case 6: - time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 - t = Time(time) - case 7: - time := binary.BigEndian.Uint64(uuid[:8]) - t = Time((time>>16)*10000 + g1582ns100) - default: // forward compatible - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - t = Time(time) - } - return t -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c7378..000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 5232b4867..000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both -// the standard UUID forms defined in RFC 4122 -// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, -// Parse accepts non-standard strings such as the raw hex encoding -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, -// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are -// examined in the latter case. Parse should not be used to validate strings as -// it parses non-standard encodings as indicated above. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34, - } { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// Validate returns an error if s is not a properly formatted UUID in one of the following formats: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} -// It returns an error if the format is invalid, otherwise nil. -func Validate(s string) error { - switch len(s) { - // Standard UUID format - case 36: - - // UUID with "urn:uuid:" prefix - case 36 + 9: - if !strings.EqualFold(s[:9], "urn:uuid:") { - return fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // UUID enclosed in braces - case 36 + 2: - if s[0] != '{' || s[len(s)-1] != '}' { - return fmt.Errorf("invalid bracketed UUID format") - } - s = s[1 : len(s)-1] - - // UUID without hyphens - case 32: - for i := 0; i < len(s); i += 2 { - _, ok := xtob(s[i], s[i+1]) - if !ok { - return errors.New("invalid UUID format") - } - } - - default: - return invalidLengthError{len(s)} - } - - // Check for standard UUID format - if len(s) == 36 { - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return errors.New("invalid UUID format") - } - for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { - if _, ok := xtob(s[x], s[x+1]); !ok { - return errors.New("invalid UUID format") - } - } - } - - return nil -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} - -// UUIDs is a slice of UUID types. -type UUIDs []UUID - -// Strings returns a string slice containing the string form of each UUID in uuids. -func (uuids UUIDs) Strings() []string { - var uuidStrs = make([]string, len(uuids)) - for i, uuid := range uuids { - uuidStrs[i] = uuid.String() - } - return uuidStrs -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 463109629..000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 7697802e4..000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go deleted file mode 100644 index 339a959a7..000000000 --- a/vendor/github.com/google/uuid/version6.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2023 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "encoding/binary" - -// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. -// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. -// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. -// -// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 -// -// NewV6 returns a Version 6 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewV6 returns Nil and an error. -func NewV6() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - /* - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | time_high | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | time_mid | time_low_and_version | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |clk_seq_hi_res | clk_seq_low | node (0-1) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | node (2-5) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ - - binary.BigEndian.PutUint64(uuid[0:], uint64(now)) - binary.BigEndian.PutUint16(uuid[8:], seq) - - uuid[6] = 0x60 | (uuid[6] & 0x0F) - uuid[8] = 0x80 | (uuid[8] & 0x3F) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go deleted file mode 100644 index 3167b643d..000000000 --- a/vendor/github.com/google/uuid/version7.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2023 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// UUID version 7 features a time-ordered value field derived from the widely -// implemented and well known Unix Epoch timestamp source, -// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. -// As well as improved entropy characteristics over versions 1 or 6. -// -// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 -// -// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. -// -// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). -// Uses the randomness pool if it was enabled with EnableRandPool. -// On error, NewV7 returns Nil and an error -func NewV7() (UUID, error) { - uuid, err := NewRandom() - if err != nil { - return uuid, err - } - makeV7(uuid[:]) - return uuid, nil -} - -// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). -// it use NewRandomFromReader fill random bits. -// On error, NewV7FromReader returns Nil and an error. -func NewV7FromReader(r io.Reader) (UUID, error) { - uuid, err := NewRandomFromReader(r) - if err != nil { - return uuid, err - } - - makeV7(uuid[:]) - return uuid, nil -} - -// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) -// uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader -func makeV7(uuid []byte) { - /* - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a (12 bit seq) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - |var| rand_b | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | rand_b | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - */ - _ = uuid[15] // bounds check - - t, s := getV7Time() - - uuid[0] = byte(t >> 40) - uuid[1] = byte(t >> 32) - uuid[2] = byte(t >> 24) - uuid[3] = byte(t >> 16) - uuid[4] = byte(t >> 8) - uuid[5] = byte(t) - - uuid[6] = 0x70 | (0x0F & byte(s>>8)) - uuid[7] = byte(s) -} - -// lastV7time is the last time we returned stored as: -// -// 52 bits of time in milliseconds since epoch -// 12 bits of (fractional nanoseconds) >> 8 -var lastV7time int64 - -const nanoPerMilli = 1000000 - -// getV7Time returns the time in milliseconds and nanoseconds / 256. -// The returned (milli << 12 + seq) is guarenteed to be greater than -// (milli << 12 + seq) returned by any previous call to getV7Time. -func getV7Time() (milli, seq int64) { - timeMu.Lock() - defer timeMu.Unlock() - - nano := timeNow().UnixNano() - milli = nano / nanoPerMilli - // Sequence number is between 0 and 3906 (nanoPerMilli>>8) - seq = (nano - milli*nanoPerMilli) >> 8 - now := milli<<12 + seq - if now <= lastV7time { - now = lastV7time + 1 - milli = now >> 12 - seq = now & 0xfff - } - lastV7time = now - return milli, seq -} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go deleted file mode 100644 index ea5beb5aa..000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2022 Google LLC. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). -// -// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. -package client - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/gob" - "errors" - "fmt" - "io" - "net/rpc" - "os" - "os/exec" - - "github.com/googleapis/enterprise-certificate-proxy/client/util" -) - -const signAPI = "EnterpriseCertSigner.Sign" -const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" -const publicKeyAPI = "EnterpriseCertSigner.Public" -const encryptAPI = "EnterpriseCertSigner.Encrypt" -const decryptAPI = "EnterpriseCertSigner.Decrypt" - -// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. -type Connection struct { - io.ReadCloser - io.WriteCloser -} - -// Close closes c's underlying ReadCloser and WriteCloser. -func (c *Connection) Close() error { - rerr := c.ReadCloser.Close() - werr := c.WriteCloser.Close() - if rerr != nil { - return rerr - } - return werr -} - -func init() { - gob.Register(crypto.SHA256) - gob.Register(crypto.SHA384) - gob.Register(crypto.SHA512) - gob.Register(&rsa.PSSOptions{}) - gob.Register(&rsa.OAEPOptions{}) -} - -// SignArgs contains arguments for a Sign API call. -type SignArgs struct { - Digest []byte // The content to sign. - Opts crypto.SignerOpts // Options for signing. Must implement HashFunc(). -} - -// EncryptArgs contains arguments for an Encrypt API call. -type EncryptArgs struct { - Plaintext []byte // The plaintext to encrypt. - Opts any // Options for encryption. Ex: an instance of crypto.Hash. -} - -// DecryptArgs contains arguments to for a Decrypt API call. -type DecryptArgs struct { - Ciphertext []byte // The ciphertext to decrypt. - Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions. -} - -// Key implements credential.Credential by holding the executed signer subprocess. -type Key struct { - cmd *exec.Cmd // Pointer to the signer subprocess. - client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess. - publicKey crypto.PublicKey // Public key of loaded certificate. - chain [][]byte // Certificate chain of loaded certificate. -} - -// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key. -func (k *Key) CertificateChain() [][]byte { - return k.chain -} - -// Close closes the RPC connection and kills the signer subprocess. -// Call this to free up resources when the Key object is no longer needed. -func (k *Key) Close() error { - if err := k.cmd.Process.Kill(); err != nil { - return fmt.Errorf("failed to kill signer process: %w", err) - } - // Wait for cmd to exit and release resources. Since the process is forcefully killed, this - // will return a non-nil error (varies by OS), which we will ignore. - _ = k.cmd.Wait() - // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. - // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. - if err := k.client.Close(); err.Error() != "close |0: file already closed" { - return fmt.Errorf("failed to close RPC connection: %w", err) - } - return nil -} - -// Public returns the public key for this Key. -func (k *Key) Public() crypto.PublicKey { - return k.publicKey -} - -// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface. -func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { - if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { - return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) - } - err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) - return -} - -// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts. -func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) { - err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext) - return -} - -// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface. -func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { - err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext) - return -} - -// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, -// possibly due to missing config or missing binary path. -var ErrCredUnavailable = errors.New("Cred is unavailable") - -// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate -// related operations, including signing messages with the private key. -// -// The signer binary path is read from the specified configFilePath, if provided. -// Otherwise, use the default config file path. -// -// The config file also specifies which certificate the signer should use. -func Cred(configFilePath string) (*Key, error) { - if configFilePath == "" { - envFilePath := util.GetConfigFilePathFromEnv() - if envFilePath != "" { - configFilePath = envFilePath - } else { - configFilePath = util.GetDefaultConfigFilePath() - } - } - enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) - if err != nil { - if errors.Is(err, util.ErrConfigUnavailable) { - return nil, ErrCredUnavailable - } - return nil, err - } - k := &Key{ - cmd: exec.Command(enterpriseCertSignerPath, configFilePath), - } - - // Redirect errors from subprocess to parent process. - k.cmd.Stderr = os.Stderr - - // RPC client will communicate with subprocess over stdin/stdout. - kin, err := k.cmd.StdinPipe() - if err != nil { - return nil, err - } - kout, err := k.cmd.StdoutPipe() - if err != nil { - return nil, err - } - k.client = rpc.NewClient(&Connection{kout, kin}) - - if err := k.cmd.Start(); err != nil { - return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err) - } - - if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil { - return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err) - } - - var publicKeyBytes []byte - if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil { - return nil, fmt.Errorf("failed to retrieve public key: %w", err) - } - - publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) - if err != nil { - return nil, fmt.Errorf("failed to parse public key: %w", err) - } - - var ok bool - k.publicKey, ok = publicKey.(crypto.PublicKey) - if !ok { - return nil, fmt.Errorf("invalid public key type: %T", publicKey) - } - - switch pub := k.publicKey.(type) { - case *rsa.PublicKey: - if pub.Size() < 256 { - return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) - } - case *ecdsa.PublicKey: - default: - return nil, fmt.Errorf("unsupported public key type: %v", pub) - } - - return k, nil -} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go deleted file mode 100644 index f374a7f55..000000000 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 Google LLC. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package util provides helper functions for the client. -package util - -import ( - "encoding/json" - "errors" - "io" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" -) - -const configFileName = "certificate_config.json" - -// EnterpriseCertificateConfig contains parameters for initializing signer. -type EnterpriseCertificateConfig struct { - Libs Libs `json:"libs"` -} - -// Libs specifies the locations of helper libraries. -type Libs struct { - ECP string `json:"ecp"` -} - -// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, -// possibly due to entire config missing or missing binary path. -var ErrConfigUnavailable = errors.New("Config is unavailable") - -// LoadSignerBinaryPath retrieves the path of the signer binary from the config file. -func LoadSignerBinaryPath(configFilePath string) (path string, err error) { - jsonFile, err := os.Open(configFilePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", ErrConfigUnavailable - } - return "", err - } - - byteValue, err := io.ReadAll(jsonFile) - if err != nil { - return "", err - } - var config EnterpriseCertificateConfig - err = json.Unmarshal(byteValue, &config) - if err != nil { - return "", err - } - signerBinaryPath := config.Libs.ECP - if signerBinaryPath == "" { - return "", ErrConfigUnavailable - } - - signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir()) - signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir()) - return signerBinaryPath, nil -} - -func guessHomeDir() string { - // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 - if v := os.Getenv("HOME"); v != "" { - return v - } - // Else, fall back to user.Current: - if u, err := user.Current(); err == nil { - return u.HomeDir - } - return "" -} - -func getDefaultConfigFileDirectory() (directory string) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud") - } - return filepath.Join(guessHomeDir(), ".config/gcloud") -} - -// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. -func GetDefaultConfigFilePath() (path string) { - return filepath.Join(getDefaultConfigFileDirectory(), configFileName) -} - -// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG -func GetConfigFilePathFromEnv() (path string) { - return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG") -} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json deleted file mode 100644 index feb372228..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "v2": "2.12.3" -} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md deleted file mode 100644 index 0d019d97f..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ /dev/null @@ -1,128 +0,0 @@ -# Changelog - -## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) - - -### Bug Fixes - -* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) - -## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) - - -### Bug Fixes - -* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) - -## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) - - -### Bug Fixes - -* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) - -## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) - - -### Features - -* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) -* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) - -## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) - - -### Features - -* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) - - -### Bug Fixes - -* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) - -## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) - - -### Features - -* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) - -## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) - - -### Bug Fixes - -* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) - -## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) - - -### Features - -* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) - - -### Documentation - -* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) - -## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) - - -### Features - -* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) - -## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) - - -### Bug Fixes - -* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) - -## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) - - -### Features - -* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) -* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) - -## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) - - -### Features - -* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) - -## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) - - -### Bug Fixes - -* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) - -## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) - - -### Features - -* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) - -## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) - - -### Features - -* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) - - -### Bug Fixes - -* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) - - -### Miscellaneous Chores - -* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE deleted file mode 100644 index 6d16b6578..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go deleted file mode 100644 index d785a065c..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2021, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package apierror implements a wrapper error for parsing error details from -// API calls. Both HTTP & gRPC status errors are supported. -// -// For examples of how to use [APIError] with client libraries please reference -// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) -// in the client library documentation. -package apierror - -import ( - "errors" - "fmt" - "strings" - - jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" - "google.golang.org/api/googleapi" - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" -) - -// ErrDetails holds the google/rpc/error_details.proto messages. -type ErrDetails struct { - ErrorInfo *errdetails.ErrorInfo - BadRequest *errdetails.BadRequest - PreconditionFailure *errdetails.PreconditionFailure - QuotaFailure *errdetails.QuotaFailure - RetryInfo *errdetails.RetryInfo - ResourceInfo *errdetails.ResourceInfo - RequestInfo *errdetails.RequestInfo - DebugInfo *errdetails.DebugInfo - Help *errdetails.Help - LocalizedMessage *errdetails.LocalizedMessage - - // Unknown stores unidentifiable error details. - Unknown []interface{} -} - -// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. -var ErrMessageNotFound = errors.New("message not found") - -// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the -// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, -// the content of the message is copied to the provided message. -// -// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the -// protocol buffer type of the provided message. -func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { - if v == nil { - return ErrMessageNotFound - } - for _, elem := range e.Unknown { - if elemProto, ok := elem.(proto.Message); ok { - if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { - proto.Merge(v, elemProto) - return nil - } - } - } - return ErrMessageNotFound -} - -func (e ErrDetails) String() string { - var d strings.Builder - if e.ErrorInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", - e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) - } - - if e.BadRequest != nil { - v := e.BadRequest.GetFieldViolations() - var f []string - var desc []string - for _, x := range v { - f = append(f, x.GetField()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", - strings.Join(f, " "), strings.Join(desc, " "))) - } - - if e.PreconditionFailure != nil { - v := e.PreconditionFailure.GetViolations() - var t []string - var s []string - var desc []string - for _, x := range v { - t = append(t, x.GetType()) - s = append(s, x.GetSubject()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), - strings.Join(s, " "), strings.Join(desc, " "))) - } - - if e.QuotaFailure != nil { - v := e.QuotaFailure.GetViolations() - var s []string - var desc []string - for _, x := range v { - s = append(s, x.GetSubject()) - desc = append(desc, x.GetDescription()) - } - d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", - strings.Join(s, " "), strings.Join(desc, " "))) - } - - if e.RequestInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", - e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) - } - - if e.ResourceInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", - e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), - e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) - - } - if e.RetryInfo != nil { - d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) - - } - if e.Unknown != nil { - var s []string - for _, x := range e.Unknown { - s = append(s, fmt.Sprintf("%v", x)) - } - d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) - } - - if e.DebugInfo != nil { - d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), - strings.Join(e.DebugInfo.GetStackEntries(), " "))) - } - if e.Help != nil { - var desc []string - var url []string - for _, x := range e.Help.Links { - desc = append(desc, x.GetDescription()) - url = append(url, x.GetUrl()) - } - d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", - strings.Join(desc, " "), strings.Join(url, " "))) - } - if e.LocalizedMessage != nil { - d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", - e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) - } - - return d.String() -} - -// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It -// implements error and Status interfaces. -type APIError struct { - err error - status *status.Status - httpErr *googleapi.Error - details ErrDetails -} - -// Details presents the error details of the APIError. -func (a *APIError) Details() ErrDetails { - return a.details -} - -// Unwrap extracts the original error. -func (a *APIError) Unwrap() error { - return a.err -} - -// Error returns a readable representation of the APIError. -func (a *APIError) Error() string { - var msg string - if a.httpErr != nil { - // Truncate the googleapi.Error message because it dumps the Details in - // an ugly way. - msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { - msg = a.err.Error() - } - return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) -} - -// GRPCStatus extracts the underlying gRPC Status error. -// This method is necessary to fulfill the interface -// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. -func (a *APIError) GRPCStatus() *status.Status { - return a.status -} - -// Reason returns the reason in an ErrorInfo. -// If ErrorInfo is nil, it returns an empty string. -func (a *APIError) Reason() string { - return a.details.ErrorInfo.GetReason() -} - -// Domain returns the domain in an ErrorInfo. -// If ErrorInfo is nil, it returns an empty string. -func (a *APIError) Domain() string { - return a.details.ErrorInfo.GetDomain() -} - -// Metadata returns the metadata in an ErrorInfo. -// If ErrorInfo is nil, it returns nil. -func (a *APIError) Metadata() map[string]string { - return a.details.ErrorInfo.GetMetadata() - -} - -// setDetailsFromError parses a Status error or a googleapi.Error -// and sets status and details or httpErr and details, respectively. -// It returns false if neither Status nor googleapi.Error can be parsed. -// When err is a googleapi.Error, the status of the returned error will -// be set to an Unknown error, rather than nil, since a nil code is -// interpreted as OK in the gRPC status package. -func (a *APIError) setDetailsFromError(err error) bool { - st, isStatus := status.FromError(err) - var herr *googleapi.Error - isHTTPErr := errors.As(err, &herr) - - switch { - case isStatus: - a.status = st - a.details = parseDetails(st.Details()) - case isHTTPErr: - a.httpErr = herr - a.details = parseHTTPDetails(herr) - a.status = status.New(codes.Unknown, herr.Message) - default: - return false - } - return true -} - -// FromError parses a Status error or a googleapi.Error and builds an -// APIError, wrapping the provided error in the new APIError. It -// returns false if neither Status nor googleapi.Error can be parsed. -func FromError(err error) (*APIError, bool) { - return ParseError(err, true) -} - -// ParseError parses a Status error or a googleapi.Error and builds an -// APIError. If wrap is true, it wraps the error in the new APIError. -// It returns false if neither Status nor googleapi.Error can be parsed. -func ParseError(err error, wrap bool) (*APIError, bool) { - if err == nil { - return nil, false - } - ae := APIError{} - if wrap { - ae = APIError{err: err} - } - if !ae.setDetailsFromError(err) { - return nil, false - } - return &ae, true -} - -// parseDetails accepts a slice of interface{} that should be backed by some -// sort of proto.Message that can be cast to the google/rpc/error_details.proto -// types. -// -// This is for internal use only. -func parseDetails(details []interface{}) ErrDetails { - var ed ErrDetails - for _, d := range details { - switch d := d.(type) { - case *errdetails.ErrorInfo: - ed.ErrorInfo = d - case *errdetails.BadRequest: - ed.BadRequest = d - case *errdetails.PreconditionFailure: - ed.PreconditionFailure = d - case *errdetails.QuotaFailure: - ed.QuotaFailure = d - case *errdetails.RetryInfo: - ed.RetryInfo = d - case *errdetails.ResourceInfo: - ed.ResourceInfo = d - case *errdetails.RequestInfo: - ed.RequestInfo = d - case *errdetails.DebugInfo: - ed.DebugInfo = d - case *errdetails.Help: - ed.Help = d - case *errdetails.LocalizedMessage: - ed.LocalizedMessage = d - default: - ed.Unknown = append(ed.Unknown, d) - } - } - - return ed -} - -// parseHTTPDetails will convert the given googleapi.Error into the protobuf -// representation then parse the Any values that contain the error details. -// -// This is for internal use only. -func parseHTTPDetails(gae *googleapi.Error) ErrDetails { - e := &jsonerror.Error{} - if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { - // If the error body does not conform to the error schema, ignore it - // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. - return ErrDetails{} - } - - // Coerce the Any messages into proto.Message then parse the details. - details := []interface{}{} - for _, any := range e.GetError().GetDetails() { - m, err := any.UnmarshalNew() - if err != nil { - // Ignore malformed Any values. - continue - } - details = append(details, m) - } - - return parseDetails(details) -} - -// HTTPCode returns the underlying HTTP response status code. This method returns -// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To -// check gRPC error codes use [google.golang.org/grpc/status.Code]. -func (a *APIError) HTTPCode() int { - if a.httpErr == nil { - return -1 - } - return a.httpErr.Code -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md deleted file mode 100644 index 9ff0caea9..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# HTTP JSON Error Schema - -The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey -error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. -This package is for internal parsing logic only and should not be used in any -other context. - -## Regeneration - -To regenerate the protobuf Go code you will need the following: - -* A local copy of [googleapis], the absolute path to which should be exported to -the environment variable `GOOGLEAPIS` -* The protobuf compiler [protoc] -* The Go [protobuf plugin] -* The [goimports] tool - -From this directory run the following command: -```sh -protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto -goimports -w . -``` - -Note: the `module` plugin option ensures the generated code is placed in this -directory, and not in several nested directories defined by `go_package` option. - -[googleapis]: https://github.com/googleapis/googleapis -[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation -[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated -[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go deleted file mode 100644 index e4b03f161..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.17.3 -// source: custom_error.proto - -package jsonerror - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Error code for `CustomError`. -type CustomError_CustomErrorCode int32 - -const ( - // Default error. - CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 - // Too many foo. - CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 - // Not enough foo. - CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 - // Catastrophic error. - CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 -) - -// Enum value maps for CustomError_CustomErrorCode. -var ( - CustomError_CustomErrorCode_name = map[int32]string{ - 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", - 1: "TOO_MANY_FOO", - 2: "NOT_ENOUGH_FOO", - 3: "UNIVERSE_WAS_DESTROYED", - } - CustomError_CustomErrorCode_value = map[string]int32{ - "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, - "TOO_MANY_FOO": 1, - "NOT_ENOUGH_FOO": 2, - "UNIVERSE_WAS_DESTROYED": 3, - } -) - -func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { - p := new(CustomError_CustomErrorCode) - *p = x - return p -} - -func (x CustomError_CustomErrorCode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { - return file_custom_error_proto_enumTypes[0].Descriptor() -} - -func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { - return &file_custom_error_proto_enumTypes[0] -} - -func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. -func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { - return file_custom_error_proto_rawDescGZIP(), []int{0, 0} -} - -// CustomError is an example of a custom error message which may be included -// in an rpc status. It is not meant to reflect a standard error. -type CustomError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Error code specific to the custom API being invoked. - Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` - // Name of the failed entity. - Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` - // Message that describes the error. - ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *CustomError) Reset() { - *x = CustomError{} - if protoimpl.UnsafeEnabled { - mi := &file_custom_error_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CustomError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CustomError) ProtoMessage() {} - -func (x *CustomError) ProtoReflect() protoreflect.Message { - mi := &file_custom_error_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. -func (*CustomError) Descriptor() ([]byte, []int) { - return file_custom_error_proto_rawDescGZIP(), []int{0} -} - -func (x *CustomError) GetCode() CustomError_CustomErrorCode { - if x != nil { - return x.Code - } - return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED -} - -func (x *CustomError) GetEntity() string { - if x != nil { - return x.Entity - } - return "" -} - -func (x *CustomError) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_custom_error_proto protoreflect.FileDescriptor - -var file_custom_error_proto_rawDesc = []byte{ - 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, - 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, - 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, - 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, - 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, - 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_custom_error_proto_rawDescOnce sync.Once - file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc -) - -func file_custom_error_proto_rawDescGZIP() []byte { - file_custom_error_proto_rawDescOnce.Do(func() { - file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) - }) - return file_custom_error_proto_rawDescData -} - -var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_custom_error_proto_goTypes = []interface{}{ - (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode - (*CustomError)(nil), // 1: error.CustomError -} -var file_custom_error_proto_depIdxs = []int32{ - 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_custom_error_proto_init() } -func file_custom_error_proto_init() { - if File_custom_error_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_custom_error_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_custom_error_proto_goTypes, - DependencyIndexes: file_custom_error_proto_depIdxs, - EnumInfos: file_custom_error_proto_enumTypes, - MessageInfos: file_custom_error_proto_msgTypes, - }.Build() - File_custom_error_proto = out.File - file_custom_error_proto_rawDesc = nil - file_custom_error_proto_goTypes = nil - file_custom_error_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto deleted file mode 100644 index 21678ae65..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package error; - -option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; - - -// CustomError is an example of a custom error message which may be included -// in an rpc status. It is not meant to reflect a standard error. -message CustomError { - - // Error code for `CustomError`. - enum CustomErrorCode { - // Default error. - CUSTOM_ERROR_CODE_UNSPECIFIED = 0; - - // Too many foo. - TOO_MANY_FOO = 1; - - // Not enough foo. - NOT_ENOUGH_FOO = 2; - - // Catastrophic error. - UNIVERSE_WAS_DESTROYED = 3; - - } - - // Error code specific to the custom API being invoked. - CustomErrorCode code = 1; - - // Name of the failed entity. - string entity = 2; - - // Message that describes the error. - string error_message = 3; -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go deleted file mode 100644 index 7dd9b8373..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.15.8 -// source: apierror/internal/proto/error.proto - -package jsonerror - -import ( - reflect "reflect" - sync "sync" - - code "google.golang.org/genproto/googleapis/rpc/code" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The error format v2 for Google JSON REST APIs. -// Copied from https://cloud.google.com/apis/design/errors#http_mapping. -// -// NOTE: This schema is not used for other wire protocols. -type Error struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The actual error payload. The nested message structure is for backward - // compatibility with Google API client libraries. It also makes the error - // more readable to developers. - Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *Error) Reset() { - *x = Error{} - if protoimpl.UnsafeEnabled { - mi := &file_apierror_internal_proto_error_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Error) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Error) ProtoMessage() {} - -func (x *Error) ProtoReflect() protoreflect.Message { - mi := &file_apierror_internal_proto_error_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Error.ProtoReflect.Descriptor instead. -func (*Error) Descriptor() ([]byte, []int) { - return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} -} - -func (x *Error) GetError() *Error_Status { - if x != nil { - return x.Error - } - return nil -} - -// This message has the same semantics as `google.rpc.Status`. It uses HTTP -// status code instead of gRPC status code. It has an extra field `status` -// for backward compatibility with Google API Client Libraries. -type Error_Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The HTTP status code that corresponds to `google.rpc.Status.code`. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // This corresponds to `google.rpc.Status.message`. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // This is the enum version for `google.rpc.Status.code`. - Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` - // This corresponds to `google.rpc.Status.details`. - Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` -} - -func (x *Error_Status) Reset() { - *x = Error_Status{} - if protoimpl.UnsafeEnabled { - mi := &file_apierror_internal_proto_error_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Error_Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Error_Status) ProtoMessage() {} - -func (x *Error_Status) ProtoReflect() protoreflect.Message { - mi := &file_apierror_internal_proto_error_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. -func (*Error_Status) Descriptor() ([]byte, []int) { - return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Error_Status) GetCode() int32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Error_Status) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *Error_Status) GetStatus() code.Code { - if x != nil { - return x.Status - } - return code.Code(0) -} - -func (x *Error_Status) GetDetails() []*anypb.Any { - if x != nil { - return x.Details - } - return nil -} - -var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor - -var file_apierror_internal_proto_error_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, - 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_apierror_internal_proto_error_proto_rawDescOnce sync.Once - file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc -) - -func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { - file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { - file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) - }) - return file_apierror_internal_proto_error_proto_rawDescData -} - -var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ - (*Error)(nil), // 0: error.Error - (*Error_Status)(nil), // 1: error.Error.Status - (code.Code)(0), // 2: google.rpc.Code - (*anypb.Any)(nil), // 3: google.protobuf.Any -} -var file_apierror_internal_proto_error_proto_depIdxs = []int32{ - 1, // 0: error.Error.error:type_name -> error.Error.Status - 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code - 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_apierror_internal_proto_error_proto_init() } -func file_apierror_internal_proto_error_proto_init() { - if File_apierror_internal_proto_error_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Error); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Error_Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_apierror_internal_proto_error_proto_goTypes, - DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, - MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, - }.Build() - File_apierror_internal_proto_error_proto = out.File - file_apierror_internal_proto_error_proto_rawDesc = nil - file_apierror_internal_proto_error_proto_goTypes = nil - file_apierror_internal_proto_error_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto deleted file mode 100644 index 4b9b13ce1..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package error; - -import "google/protobuf/any.proto"; -import "google/rpc/code.proto"; - -option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; - -// The error format v2 for Google JSON REST APIs. -// Copied from https://cloud.google.com/apis/design/errors#http_mapping. -// -// NOTE: This schema is not used for other wire protocols. -message Error { - // This message has the same semantics as `google.rpc.Status`. It uses HTTP - // status code instead of gRPC status code. It has an extra field `status` - // for backward compatibility with Google API Client Libraries. - message Status { - // The HTTP status code that corresponds to `google.rpc.Status.code`. - int32 code = 1; - // This corresponds to `google.rpc.Status.message`. - string message = 2; - // This is the enum version for `google.rpc.Status.code`. - google.rpc.Code status = 4; - // This corresponds to `google.rpc.Status.details`. - repeated google.protobuf.Any details = 5; - } - // The actual error payload. The nested message structure is for backward - // compatibility with Google API client libraries. It also makes the error - // more readable to developers. - Status error = 1; -} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go deleted file mode 100644 index c52e03f64..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "errors" - "math/rand" - "time" - - "google.golang.org/api/googleapi" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// CallOption is an option used by Invoke to control behaviors of RPC calls. -// CallOption works by modifying relevant fields of CallSettings. -type CallOption interface { - // Resolve applies the option by modifying cs. - Resolve(cs *CallSettings) -} - -// Retryer is used by Invoke to determine retry behavior. -type Retryer interface { - // Retry reports whether a request should be retried and how long to pause before retrying - // if the previous attempt returned with err. Invoke never calls Retry with nil error. - Retry(err error) (pause time.Duration, shouldRetry bool) -} - -type retryerOption func() Retryer - -func (o retryerOption) Resolve(s *CallSettings) { - s.Retry = o -} - -// WithRetry sets CallSettings.Retry to fn. -func WithRetry(fn func() Retryer) CallOption { - return retryerOption(fn) -} - -// OnErrorFunc returns a Retryer that retries if and only if the previous attempt -// returns an error that satisfies shouldRetry. -// -// Pause times between retries are specified by bo. bo is only used for its -// parameters; each Retryer has its own copy. -func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { - return &errorRetryer{ - shouldRetry: shouldRetry, - backoff: bo, - } -} - -type errorRetryer struct { - backoff Backoff - shouldRetry func(err error) bool -} - -func (r *errorRetryer) Retry(err error) (time.Duration, bool) { - if r.shouldRetry(err) { - return r.backoff.Pause(), true - } - - return 0, false -} - -// OnCodes returns a Retryer that retries if and only if -// the previous attempt returns a GRPC error whose error code is stored in cc. -// Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnCodes(cc []codes.Code, bo Backoff) Retryer { - return &boRetryer{ - backoff: bo, - codes: append([]codes.Code(nil), cc...), - } -} - -type boRetryer struct { - backoff Backoff - codes []codes.Code -} - -func (r *boRetryer) Retry(err error) (time.Duration, bool) { - st, ok := status.FromError(err) - if !ok { - return 0, false - } - c := st.Code() - for _, rc := range r.codes { - if c == rc { - return r.backoff.Pause(), true - } - } - return 0, false -} - -// OnHTTPCodes returns a Retryer that retries if and only if -// the previous attempt returns a googleapi.Error whose status code is stored in -// cc. Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnHTTPCodes(bo Backoff, cc ...int) Retryer { - codes := make(map[int]bool, len(cc)) - for _, c := range cc { - codes[c] = true - } - - return &httpRetryer{ - backoff: bo, - codes: codes, - } -} - -type httpRetryer struct { - backoff Backoff - codes map[int]bool -} - -func (r *httpRetryer) Retry(err error) (time.Duration, bool) { - var gerr *googleapi.Error - if !errors.As(err, &gerr) { - return 0, false - } - - if r.codes[gerr.Code] { - return r.backoff.Pause(), true - } - - return 0, false -} - -// Backoff implements exponential backoff. The wait time between retries is a -// random value between 0 and the "retry period" - the time between retries. The -// retry period starts at Initial and increases by the factor of Multiplier -// every retry, but is capped at Max. -// -// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should -// be built on top of Backoff. -type Backoff struct { - // Initial is the initial value of the retry period, defaults to 1 second. - Initial time.Duration - - // Max is the maximum value of the retry period, defaults to 30 seconds. - Max time.Duration - - // Multiplier is the factor by which the retry period increases. - // It should be greater than 1 and defaults to 2. - Multiplier float64 - - // cur is the current retry period. - cur time.Duration -} - -// Pause returns the next time.Duration that the caller should use to backoff. -func (bo *Backoff) Pause() time.Duration { - if bo.Initial == 0 { - bo.Initial = time.Second - } - if bo.cur == 0 { - bo.cur = bo.Initial - } - if bo.Max == 0 { - bo.Max = 30 * time.Second - } - if bo.Multiplier < 1 { - bo.Multiplier = 2 - } - // Select a duration between 1ns and the current max. It might seem - // counterintuitive to have so much jitter, but - // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that - // that is the best strategy. - d := time.Duration(1 + rand.Int63n(int64(bo.cur))) - bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) - if bo.cur > bo.Max { - bo.cur = bo.Max - } - return d -} - -type grpcOpt []grpc.CallOption - -func (o grpcOpt) Resolve(s *CallSettings) { - s.GRPC = o -} - -type pathOpt struct { - p string -} - -func (p pathOpt) Resolve(s *CallSettings) { - s.Path = p.p -} - -type timeoutOpt struct { - t time.Duration -} - -func (t timeoutOpt) Resolve(s *CallSettings) { - s.timeout = t.t -} - -// WithPath applies a Path override to the HTTP-based APICall. -// -// This is for internal use only. -func WithPath(p string) CallOption { - return &pathOpt{p: p} -} - -// WithGRPCOptions allows passing gRPC call options during client creation. -func WithGRPCOptions(opt ...grpc.CallOption) CallOption { - return grpcOpt(append([]grpc.CallOption(nil), opt...)) -} - -// WithTimeout is a convenience option for setting a context.WithTimeout on the -// singular context.Context used for **all** APICall attempts. Calculated from -// the start of the first APICall attempt. -// If the context.Context provided to Invoke already has a Deadline set, that -// will always be respected over the deadline calculated using this option. -func WithTimeout(t time.Duration) CallOption { - return &timeoutOpt{t: t} -} - -// CallSettings allow fine-grained control over how calls are made. -type CallSettings struct { - // Retry returns a Retryer to be used to control retry logic of a method call. - // If Retry is nil or the returned Retryer is nil, the call will not be retried. - Retry func() Retryer - - // CallOptions to be forwarded to GRPC. - GRPC []grpc.CallOption - - // Path is an HTTP override for an APICall. - Path string - - // Timeout defines the amount of time that Invoke has to complete. - // Unexported so it cannot be changed by the code in an APICall. - timeout time.Duration -} diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go deleted file mode 100644 index f5af5c990..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2023, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package callctx provides helpers for storing and retrieving values out of -// [context.Context]. These values are used by our client libraries in various -// ways across the stack. -package callctx - -import ( - "context" - "fmt" -) - -const ( - // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] - // that specifies the response read mask. The value(s) for this header - // must adhere to format described in [fieldmaskpb]. - // - // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters - // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb - XGoogFieldMaskHeader = "x-goog-fieldmask" - - headerKey = contextKey("header") -) - -// contextKey is a private type used to store/retrieve context values. -type contextKey string - -// HeadersFromContext retrieves headers set from [SetHeaders]. These headers -// can then be cast to http.Header or metadata.MD to send along on requests. -func HeadersFromContext(ctx context.Context) map[string][]string { - m, ok := ctx.Value(headerKey).(map[string][]string) - if !ok { - return nil - } - return m -} - -// SetHeaders stores key value pairs in the returned context that can later -// be retrieved by [HeadersFromContext]. Values stored in this manner will -// automatically be retrieved by client libraries and sent as outgoing headers -// on all requests. keyvals should have a corresponding value for every key -// provided. If there is an odd number of keyvals this method will panic. -func SetHeaders(ctx context.Context, keyvals ...string) context.Context { - if len(keyvals)%2 != 0 { - panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) - } - h, ok := ctx.Value(headerKey).(map[string][]string) - if !ok { - h = make(map[string][]string) - } else { - h = cloneHeaders(h) - } - - for i := 0; i < len(keyvals); i = i + 2 { - h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) - } - return context.WithValue(ctx, headerKey, h) -} - -// cloneHeaders makes a new key-value map while reusing the value slices. -// As such, new values should be appended to the value slice, and modifying -// indexed values is not thread safe. -// -// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. -func cloneHeaders(h map[string][]string) map[string][]string { - c := make(map[string][]string, len(h)) - for k, v := range h { - vc := make([]string, len(v)) - copy(vc, v) - c[k] = vc - } - return c -} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go deleted file mode 100644 index 1b53d0a3a..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/content_type.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "io" - "io/ioutil" - "net/http" -) - -const sniffBuffSize = 512 - -func newContentSniffer(r io.Reader) *contentSniffer { - return &contentSniffer{r: r} -} - -// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. -type contentSniffer struct { - r io.Reader - start []byte // buffer for the sniffed bytes. - err error // set to any error encountered while reading bytes to be sniffed. - - ctype string // set on first sniff. - sniffed bool // set to true on first sniff. -} - -func (cs *contentSniffer) Read(p []byte) (n int, err error) { - // Ensure that the content type is sniffed before any data is consumed from Reader. - _, _ = cs.ContentType() - - if len(cs.start) > 0 { - n := copy(p, cs.start) - cs.start = cs.start[n:] - return n, nil - } - - // We may have read some bytes into start while sniffing, even if the read ended in an error. - // We should first return those bytes, then the error. - if cs.err != nil { - return 0, cs.err - } - - // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. - return cs.r.Read(p) -} - -// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. -func (cs *contentSniffer) ContentType() (string, bool) { - if cs.sniffed { - return cs.ctype, cs.ctype != "" - } - cs.sniffed = true - // If ReadAll hits EOF, it returns err==nil. - cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) - - // Don't try to detect the content type based on possibly incomplete data. - if cs.err != nil { - return "", false - } - - cs.ctype = http.DetectContentType(cs.start) - return cs.ctype, true -} - -// DetermineContentType determines the content type of the supplied reader. -// The content of media will be sniffed to determine the content type. -// After calling DetectContentType the caller must not perform further reads on -// media, but rather read from the Reader that is returned. -func DetermineContentType(media io.Reader) (io.Reader, string) { - // For backwards compatibility, allow clients to set content - // type by providing a ContentTyper for media. - // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. - if typer, ok := media.(interface { - ContentType() string - }); ok { - return media, typer.ContentType() - } - - sniffer := newContentSniffer(media) - if ctype, ok := sniffer.ContentType(); ok { - return sniffer, ctype - } - // If content type could not be sniffed, reads from sniffer will eventually fail with an error. - return sniffer, "" -} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go deleted file mode 100644 index 36cdfa33e..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/gax.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gax contains a set of modules which aid the development of APIs -// for clients and servers based on gRPC and Google API conventions. -// -// Application code will rarely need to use this library directly. -// However, code generated automatically from API definition files can use it -// to simplify code generation and to provide more convenient and idiomatic API surfaces. -package gax - -import "github.com/googleapis/gax-go/v2/internal" - -// Version specifies the gax-go version being used. -const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go deleted file mode 100644 index 3e53729e5..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2018, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "bytes" - "context" - "fmt" - "net/http" - "runtime" - "strings" - "unicode" - - "github.com/googleapis/gax-go/v2/callctx" - "google.golang.org/grpc/metadata" -) - -var ( - // GoVersion is a header-safe representation of the current runtime - // environment's Go version. This is for GAX consumers that need to - // report the Go runtime version in API calls. - GoVersion string - // version is a package internal global variable for testing purposes. - version = runtime.Version -) - -// versionUnknown is only used when the runtime version cannot be determined. -const versionUnknown = "UNKNOWN" - -func init() { - GoVersion = goVersion() -} - -// goVersion returns a Go runtime version derived from the runtime environment -// that is modified to be suitable for reporting in a header, meaning it has no -// whitespace. If it is unable to determine the Go runtime version, it returns -// versionUnknown. -func goVersion() string { - const develPrefix = "devel +" - - s := version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - // Some release candidates already have a dash in them. - if !strings.HasPrefix(prerelease, "-") { - prerelease = "-" + prerelease - } - s += prerelease - } - return s - } - return "UNKNOWN" -} - -// XGoogHeader is for use by the Google Cloud Libraries only. See package -// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving -// request/response headers. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} - -// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries -// only. See package [github.com/googleapis/gax-go/v2/callctx] for help -// setting/retrieving request/response headers. -// -// InsertMetadataIntoOutgoingContext returns a new context that merges the -// provided keyvals metadata pairs with any existing metadata/headers in the -// provided context. keyvals should have a corresponding value for every key -// provided. If there is an odd number of keyvals this method will panic. -// Existing values for keys will not be overwritten, instead provided values -// will be appended to the list of existing values. -func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { - return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) -} - -// BuildHeaders is for use by the Google Cloud Libraries only. See package -// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving -// request/response headers. -// -// BuildHeaders returns a new http.Header that merges the provided -// keyvals header pairs with any existing metadata/headers in the provided -// context. keyvals should have a corresponding value for every key provided. -// If there is an odd number of keyvals this method will panic. -// Existing values for keys will not be overwritten, instead provided values -// will be appended to the list of existing values. -func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { - return http.Header(insertMetadata(ctx, keyvals...)) -} - -func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { - if len(keyvals)%2 != 0 { - panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) - } - out, ok := metadata.FromOutgoingContext(ctx) - if !ok { - out = metadata.MD(make(map[string][]string)) - } - headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) - } - for i := 0; i < len(keyvals); i = i + 2 { - out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) - } - return out -} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go deleted file mode 100644 index 90348f303..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package internal - -// Version is the current tagged release of the library. -const Version = "2.12.3" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go deleted file mode 100644 index 721d1af55..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "context" - "strings" - "time" - - "github.com/googleapis/gax-go/v2/apierror" -) - -// APICall is a user defined call stub. -type APICall func(context.Context, CallSettings) error - -// Invoke calls the given APICall, performing retries as specified by opts, if -// any. -func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { - var settings CallSettings - for _, opt := range opts { - opt.Resolve(&settings) - } - return invoke(ctx, call, settings, Sleep) -} - -// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. -// If interrupted, Sleep returns ctx.Err(). -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -type sleeper func(ctx context.Context, d time.Duration) error - -// invoke implements Invoke, taking an additional sleeper argument for testing. -func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { - var retryer Retryer - - // Only use the value provided via WithTimeout if the context doesn't - // already have a deadline. This is important for backwards compatibility if - // the user already set a deadline on the context given to Invoke. - if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { - c, cc := context.WithTimeout(ctx, settings.timeout) - defer cc() - ctx = c - } - - for { - err := call(ctx, settings) - if err == nil { - return nil - } - // Never retry permanent certificate errors. (e.x. if ca-certificates - // are not installed). We should only make very few, targeted - // exceptions: many (other) status=Unavailable should be retried, such - // as if there's a network hiccup, or the internet goes out for a - // minute. This is also why here we are doing string parsing instead of - // simply making Unavailable a non-retried code elsewhere. - if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { - return err - } - if apierr, ok := apierror.FromError(err); ok { - err = apierr - } - if settings.Retry == nil { - return err - } - if retryer == nil { - if r := settings.Retry(); r != nil { - retryer = r - } else { - return err - } - } - if d, ok := retryer.Retry(err); !ok { - return err - } else if err = sp(ctx, d); err != nil { - return err - } - } -} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go deleted file mode 100644 index cc4486eb9..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2022, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "encoding/json" - "errors" - "io" - - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" -) - -var ( - arrayOpen = json.Delim('[') - arrayClose = json.Delim(']') - errBadOpening = errors.New("unexpected opening token, expected '['") -) - -// ProtoJSONStream represents a wrapper for consuming a stream of protobuf -// messages encoded using protobuf-JSON format. More information on this format -// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. -// The stream must appear as a comma-delimited, JSON array of obbjects with -// opening and closing square braces. -// -// This is for internal use only. -type ProtoJSONStream struct { - first, closed bool - reader io.ReadCloser - stream *json.Decoder - typ protoreflect.MessageType -} - -// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are -// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream -// must be closed when done. -// -// This is for internal use only. -func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { - return &ProtoJSONStream{ - first: true, - reader: rc, - stream: json.NewDecoder(rc), - typ: typ, - } -} - -// Recv decodes the next protobuf message in the stream or returns io.EOF if -// the stream is done. It is not safe to call Recv on the same stream from -// different goroutines, just like it is not safe to do so with a single gRPC -// stream. Type-cast the protobuf message returned to the type provided at -// ProtoJSONStream creation. -// Calls to Recv after calling Close will produce io.EOF. -func (s *ProtoJSONStream) Recv() (proto.Message, error) { - if s.closed { - return nil, io.EOF - } - if s.first { - s.first = false - - // Consume the opening '[' so Decode gets one object at a time. - if t, err := s.stream.Token(); err != nil { - return nil, err - } else if t != arrayOpen { - return nil, errBadOpening - } - } - - // Capture the next block of data for the item (a JSON object) in the stream. - var raw json.RawMessage - if err := s.stream.Decode(&raw); err != nil { - e := err - // To avoid checking the first token of each stream, just attempt to - // Decode the next blob and if that fails, double check if it is just - // the closing token ']'. If it is the closing, return io.EOF. If it - // isn't, return the original error. - if t, _ := s.stream.Token(); t == arrayClose { - e = io.EOF - } - return nil, e - } - - // Initialize a new instance of the protobuf message to unmarshal the - // raw data into. - m := s.typ.New().Interface() - err := protojson.Unmarshal(raw, m) - - return m, err -} - -// Close closes the stream so that resources are cleaned up. -func (s *ProtoJSONStream) Close() error { - // Dereference the *json.Decoder so that the memory is gc'd. - s.stream = nil - s.closed = true - - return s.reader.Close() -} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json deleted file mode 100644 index 61ee266a1..000000000 --- a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "release-type": "go-yoshi", - "separate-pull-requests": true, - "include-component-in-tag": false, - "packages": { - "v2": { - "component": "v2" - } - } -} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b5..000000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a3216..000000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor†- - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version†- - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution†- - means Covered Software of a particular Contributor. - -1.4. “Covered Software†- - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses†- means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form†- - means any form of the work other than Source Code Form. - -1.7. “Larger Work†- - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License†- - means this document. - -1.9. “Licensable†- - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications†- - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims†of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License†- - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form†- - means the form of the work preferred for making modifications. - -1.14. “You†(or “Yourâ€) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You†includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control†means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is†basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses†Notice - - This Source Code Form is “Incompatible - With Secondary Licensesâ€, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f5..000000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326d..000000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index bed9ebbe1..000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,729 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381df..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcfb..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe3..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa2..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664e..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f072..000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f095..000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c292..000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4c..000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f920e973..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2022 Alan Shreve (@inconshreveable) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 06a91f086..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !windows -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 0c5688021..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -package mousetrap - -import ( - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(syscall.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml deleted file mode 100644 index 2fc5e5f5b..000000000 --- a/vendor/github.com/jessevdk/go-flags/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.16.x - -install: - # go-flags - - go build -v ./... - - # linting - - go get -v golang.org/x/lint/golint - - # code coverage - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/modocache/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi - -script: - # go-flags - - $(exit $(gofmt -l . | wc -l)) - - go test -v ./... - - # linting - - go tool vet -all=true -v=true . || true - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./... - - # code coverage - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi - -env: - # coveralls.io - secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU=" diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE deleted file mode 100644 index bcca0d521..000000000 --- a/vendor/github.com/jessevdk/go-flags/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md deleted file mode 100644 index f22650b20..000000000 --- a/vendor/github.com/jessevdk/go-flags/README.md +++ /dev/null @@ -1,139 +0,0 @@ -go-flags: a go library for parsing command line arguments -========================================================= - -[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master) - -This library provides similar functionality to the builtin flag library of -go, but provides much more functionality and nicer formatting. From the -documentation: - -Package flags provides an extensive command line option parser. -The flags package is similar in functionality to the go builtin flag package -but provides more options and uses reflection to provide a convenient and -succinct way of specifying command line options. - -Supported features: -* Options with short names (-v) -* Options with long names (--verbose) -* Options with and without arguments (bool v.s. other type) -* Options with optional arguments and default values -* Multiple option groups each containing a set of options -* Generate and print well-formatted help message -* Passing remaining command line arguments after -- (optional) -* Ignoring unknown command line options (optional) -* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification -* Supports multiple short options -aux -* Supports all primitive go types (string, int{8..64}, uint{8..64}, float) -* Supports same option multiple times (can store in slice or last option counts) -* Supports maps -* Supports function callbacks -* Supports namespaces for (nested) option groups - -The flags package uses structs, reflection and struct field tags -to allow users to specify command line options. This results in very simple -and concise specification of your application options. For example: - -```go -type Options struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` -} -``` - -This specifies one option with a short name -v and a long name --verbose. -When either -v or --verbose is found on the command line, a 'true' value -will be appended to the Verbose field. e.g. when specifying -vvv, the -resulting value of Verbose will be {[true, true, true]}. - -Example: --------- -```go -var opts struct { - // Slice of bool will append 'true' each time the option - // is encountered (can be set multiple times, like -vvv) - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - - // Example of automatic marshalling to desired type (uint) - Offset uint `long:"offset" description:"Offset"` - - // Example of a callback, called each time the option is found. - Call func(string) `short:"c" description:"Call phone number"` - - // Example of a required flag - Name string `short:"n" long:"name" description:"A name" required:"true"` - - // Example of a flag restricted to a pre-defined set of strings - Animal string `long:"animal" choice:"cat" choice:"dog"` - - // Example of a value name - File string `short:"f" long:"file" description:"A file" value-name:"FILE"` - - // Example of a pointer - Ptr *int `short:"p" description:"A pointer to an integer"` - - // Example of a slice of strings - StringSlice []string `short:"s" description:"A slice of strings"` - - // Example of a slice of pointers - PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"` - - // Example of a map - IntMap map[string]int `long:"intmap" description:"A map from string to int"` -} - -// Callback which will invoke callto: to call a number. -// Note that this works just on OS X (and probably only with -// Skype) but it shows the idea. -opts.Call = func(num string) { - cmd := exec.Command("open", "callto:"+num) - cmd.Start() - cmd.Process.Release() -} - -// Make some fake arguments to parse. -args := []string{ - "-vv", - "--offset=5", - "-n", "Me", - "--animal", "dog", // anything other than "cat" or "dog" will raise an error - "-p", "3", - "-s", "hello", - "-s", "world", - "--ptrslice", "hello", - "--ptrslice", "world", - "--intmap", "a:1", - "--intmap", "b:5", - "arg1", - "arg2", - "arg3", -} - -// Parse flags from `args'. Note that here we use flags.ParseArgs for -// the sake of making a working example. Normally, you would simply use -// flags.Parse(&opts) which uses os.Args -args, err := flags.ParseArgs(&opts, args) - -if err != nil { - panic(err) -} - -fmt.Printf("Verbosity: %v\n", opts.Verbose) -fmt.Printf("Offset: %d\n", opts.Offset) -fmt.Printf("Name: %s\n", opts.Name) -fmt.Printf("Animal: %s\n", opts.Animal) -fmt.Printf("Ptr: %d\n", *opts.Ptr) -fmt.Printf("StringSlice: %v\n", opts.StringSlice) -fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1]) -fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"]) -fmt.Printf("Remaining args: %s\n", strings.Join(args, " ")) - -// Output: Verbosity: [true true] -// Offset: 5 -// Name: Me -// Ptr: 3 -// StringSlice: [hello world] -// PtrSlice: [hello world] -// IntMap: [a:1 b:5] -// Remaining args: arg1 arg2 arg3 -``` - -More information can be found in the godocs: diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go deleted file mode 100644 index 8ec62048f..000000000 --- a/vendor/github.com/jessevdk/go-flags/arg.go +++ /dev/null @@ -1,27 +0,0 @@ -package flags - -import ( - "reflect" -) - -// Arg represents a positional argument on the command line. -type Arg struct { - // The name of the positional argument (used in the help) - Name string - - // A description of the positional argument (used in the help) - Description string - - // The minimal number of required positional arguments - Required int - - // The maximum number of required positional arguments - RequiredMaximum int - - value reflect.Value - tag multiTag -} - -func (a *Arg) isRemaining() bool { - return a.value.Type().Kind() == reflect.Slice -} diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh deleted file mode 100644 index 5edc430ed..000000000 --- a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -set -e - -echo '# linux arm7' -GOARM=7 GOARCH=arm GOOS=linux go build -echo '# linux arm5' -GOARM=5 GOARCH=arm GOOS=linux go build -echo '# windows 386' -GOARCH=386 GOOS=windows go build -echo '# windows amd64' -GOARCH=amd64 GOOS=windows go build -echo '# darwin' -GOARCH=amd64 GOOS=darwin go build -echo '# freebsd' -GOARCH=amd64 GOOS=freebsd go build -echo '# aix ppc64' -GOARCH=ppc64 GOOS=aix go build -echo '# solaris amd64' -GOARCH=amd64 GOOS=solaris go build diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go deleted file mode 100644 index 3b518757c..000000000 --- a/vendor/github.com/jessevdk/go-flags/closest.go +++ /dev/null @@ -1,59 +0,0 @@ -package flags - -func levenshtein(s string, t string) int { - if len(s) == 0 { - return len(t) - } - - if len(t) == 0 { - return len(s) - } - - dists := make([][]int, len(s)+1) - for i := range dists { - dists[i] = make([]int, len(t)+1) - dists[i][0] = i - } - - for j := range t { - dists[0][j] = j - } - - for i, sc := range s { - for j, tc := range t { - if sc == tc { - dists[i+1][j+1] = dists[i][j] - } else { - dists[i+1][j+1] = dists[i][j] + 1 - if dists[i+1][j] < dists[i+1][j+1] { - dists[i+1][j+1] = dists[i+1][j] + 1 - } - if dists[i][j+1] < dists[i+1][j+1] { - dists[i+1][j+1] = dists[i][j+1] + 1 - } - } - } - } - - return dists[len(s)][len(t)] -} - -func closestChoice(cmd string, choices []string) (string, int) { - if len(choices) == 0 { - return "", 0 - } - - mincmd := -1 - mindist := -1 - - for i, c := range choices { - l := levenshtein(cmd, c) - - if mincmd < 0 || l < mindist { - mindist = l - mincmd = i - } - } - - return choices[mincmd], mindist -} diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go deleted file mode 100644 index 879465d7a..000000000 --- a/vendor/github.com/jessevdk/go-flags/command.go +++ /dev/null @@ -1,465 +0,0 @@ -package flags - -import ( - "reflect" - "sort" - "strconv" - "strings" -) - -// Command represents an application command. Commands can be added to the -// parser (which itself is a command) and are selected/executed when its name -// is specified on the command line. The Command type embeds a Group and -// therefore also carries a set of command specific options. -type Command struct { - // Embedded, see Group for more information - *Group - - // The name by which the command can be invoked - Name string - - // The active sub command (set by parsing) or nil - Active *Command - - // Whether subcommands are optional - SubcommandsOptional bool - - // Aliases for the command - Aliases []string - - // Whether positional arguments are required - ArgsRequired bool - - commands []*Command - hasBuiltinHelpGroup bool - args []*Arg -} - -// Commander is an interface which can be implemented by any command added in -// the options. When implemented, the Execute method will be called for the last -// specified (sub)command providing the remaining command line arguments. -type Commander interface { - // Execute will be called for the last active (sub)command. The - // args argument contains the remaining command line arguments. The - // error that Execute returns will be eventually passed out of the - // Parse method of the Parser. - Execute(args []string) error -} - -// Usage is an interface which can be implemented to show a custom usage string -// in the help message shown for a command. -type Usage interface { - // Usage is called for commands to allow customized printing of command - // usage in the generated help message. - Usage() string -} - -type lookup struct { - shortNames map[string]*Option - longNames map[string]*Option - - commands map[string]*Command -} - -// AddCommand adds a new command to the parser with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the command. The provided data can implement the Command and -// Usage interfaces. -func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) { - cmd := newCommand(command, shortDescription, longDescription, data) - - cmd.parent = c - - if err := cmd.scan(); err != nil { - return nil, err - } - - c.commands = append(c.commands, cmd) - return cmd, nil -} - -// AddGroup adds a new group to the command with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the group. -func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { - group := newGroup(shortDescription, longDescription, data) - - group.parent = c - - if err := group.scanType(c.scanSubcommandHandler(group)); err != nil { - return nil, err - } - - c.groups = append(c.groups, group) - return group, nil -} - -// Commands returns a list of subcommands of this command. -func (c *Command) Commands() []*Command { - return c.commands -} - -// Find locates the subcommand with the given name and returns it. If no such -// command can be found Find will return nil. -func (c *Command) Find(name string) *Command { - for _, cc := range c.commands { - if cc.match(name) { - return cc - } - } - - return nil -} - -// FindOptionByLongName finds an option that is part of the command, or any of -// its parent commands, by matching its long name (including the option -// namespace). -func (c *Command) FindOptionByLongName(longName string) (option *Option) { - for option == nil && c != nil { - option = c.Group.FindOptionByLongName(longName) - - c, _ = c.parent.(*Command) - } - - return option -} - -// FindOptionByShortName finds an option that is part of the command, or any of -// its parent commands, by matching its long name (including the option -// namespace). -func (c *Command) FindOptionByShortName(shortName rune) (option *Option) { - for option == nil && c != nil { - option = c.Group.FindOptionByShortName(shortName) - - c, _ = c.parent.(*Command) - } - - return option -} - -// Args returns a list of positional arguments associated with this command. -func (c *Command) Args() []*Arg { - ret := make([]*Arg, len(c.args)) - copy(ret, c.args) - - return ret -} - -func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command { - return &Command{ - Group: newGroup(shortDescription, longDescription, data), - Name: name, - } -} - -func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { - f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) { - mtag := newMultiTag(string(sfield.Tag)) - - if err := mtag.Parse(); err != nil { - return true, err - } - - positional := mtag.Get("positional-args") - - if len(positional) != 0 { - stype := realval.Type() - - for i := 0; i < stype.NumField(); i++ { - field := stype.Field(i) - - m := newMultiTag((string(field.Tag))) - - if err := m.Parse(); err != nil { - return true, err - } - - name := m.Get("positional-arg-name") - - if len(name) == 0 { - name = field.Name - } - - required := -1 - requiredMaximum := -1 - - sreq := m.Get("required") - - if sreq != "" { - required = 1 - - rng := strings.SplitN(sreq, "-", 2) - - if len(rng) > 1 { - if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil { - required = int(preq) - } - - if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil { - requiredMaximum = int(preq) - } - } else { - if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil { - required = int(preq) - } - } - } - - arg := &Arg{ - Name: name, - Description: m.Get("description"), - Required: required, - RequiredMaximum: requiredMaximum, - - value: realval.Field(i), - tag: m, - } - - c.args = append(c.args, arg) - - if len(mtag.Get("required")) != 0 { - c.ArgsRequired = true - } - } - - return true, nil - } - - subcommand := mtag.Get("command") - - if len(subcommand) != 0 { - var ptrval reflect.Value - - if realval.Kind() == reflect.Ptr { - ptrval = realval - - if ptrval.IsNil() { - ptrval.Set(reflect.New(ptrval.Type().Elem())) - } - } else { - ptrval = realval.Addr() - } - - shortDescription := mtag.Get("description") - longDescription := mtag.Get("long-description") - subcommandsOptional := mtag.Get("subcommands-optional") - aliases := mtag.GetMany("alias") - - subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface()) - - if err != nil { - return true, err - } - - subc.Hidden = mtag.Get("hidden") != "" - - if len(subcommandsOptional) > 0 { - subc.SubcommandsOptional = true - } - - if len(aliases) > 0 { - subc.Aliases = aliases - } - - return true, nil - } - - return parentg.scanSubGroupHandler(realval, sfield) - } - - return f -} - -func (c *Command) scan() error { - return c.scanType(c.scanSubcommandHandler(c.Group)) -} - -func (c *Command) eachOption(f func(*Command, *Group, *Option)) { - c.eachCommand(func(c *Command) { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - f(c, g, option) - } - }) - }, true) -} - -func (c *Command) eachCommand(f func(*Command), recurse bool) { - f(c) - - for _, cc := range c.commands { - if recurse { - cc.eachCommand(f, true) - } else { - f(cc) - } - } -} - -func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) { - c.eachGroup(func(g *Group) { - f(c, g) - }) - - if c.Active != nil { - c.Active.eachActiveGroup(f) - } -} - -func (c *Command) addHelpGroups(showHelp func() error) { - if !c.hasBuiltinHelpGroup { - c.addHelpGroup(showHelp) - c.hasBuiltinHelpGroup = true - } - - for _, cc := range c.commands { - cc.addHelpGroups(showHelp) - } -} - -func (c *Command) makeLookup() lookup { - ret := lookup{ - shortNames: make(map[string]*Option), - longNames: make(map[string]*Option), - commands: make(map[string]*Command), - } - - parent := c.parent - - var parents []*Command - - for parent != nil { - if cmd, ok := parent.(*Command); ok { - parents = append(parents, cmd) - parent = cmd.parent - } else { - parent = nil - } - } - - for i := len(parents) - 1; i >= 0; i-- { - parents[i].fillLookup(&ret, true) - } - - c.fillLookup(&ret, false) - return ret -} - -func (c *Command) fillLookup(ret *lookup, onlyOptions bool) { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - if option.ShortName != 0 { - ret.shortNames[string(option.ShortName)] = option - } - - if len(option.LongName) > 0 { - ret.longNames[option.LongNameWithNamespace()] = option - } - } - }) - - if onlyOptions { - return - } - - for _, subcommand := range c.commands { - ret.commands[subcommand.Name] = subcommand - - for _, a := range subcommand.Aliases { - ret.commands[a] = subcommand - } - } -} - -func (c *Command) groupByName(name string) *Group { - if grp := c.Group.groupByName(name); grp != nil { - return grp - } - - for _, subc := range c.commands { - prefix := subc.Name + "." - - if strings.HasPrefix(name, prefix) { - if grp := subc.groupByName(name[len(prefix):]); grp != nil { - return grp - } - } else if name == subc.Name { - return subc.Group - } - } - - return nil -} - -type commandList []*Command - -func (c commandList) Less(i, j int) bool { - return c[i].Name < c[j].Name -} - -func (c commandList) Len() int { - return len(c) -} - -func (c commandList) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -func (c *Command) sortedVisibleCommands() []*Command { - ret := commandList(c.visibleCommands()) - sort.Sort(ret) - - return []*Command(ret) -} - -func (c *Command) visibleCommands() []*Command { - ret := make([]*Command, 0, len(c.commands)) - - for _, cmd := range c.commands { - if !cmd.Hidden { - ret = append(ret, cmd) - } - } - - return ret -} - -func (c *Command) match(name string) bool { - if c.Name == name { - return true - } - - for _, v := range c.Aliases { - if v == name { - return true - } - } - - return false -} - -func (c *Command) hasHelpOptions() bool { - ret := false - - c.eachGroup(func(g *Group) { - if g.isBuiltinHelp { - return - } - - for _, opt := range g.options { - if opt.showInHelp() { - ret = true - } - } - }) - - return ret -} - -func (c *Command) fillParseState(s *parseState) { - s.positional = make([]*Arg, len(c.args)) - copy(s.positional, c.args) - - s.lookup = c.makeLookup() - s.command = c -} diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go deleted file mode 100644 index 8ed61f1db..000000000 --- a/vendor/github.com/jessevdk/go-flags/completion.go +++ /dev/null @@ -1,315 +0,0 @@ -package flags - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - "unicode/utf8" -) - -// Completion is a type containing information of a completion. -type Completion struct { - // The completed item - Item string - - // A description of the completed item (optional) - Description string -} - -type completions []Completion - -func (c completions) Len() int { - return len(c) -} - -func (c completions) Less(i, j int) bool { - return c[i].Item < c[j].Item -} - -func (c completions) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -// Completer is an interface which can be implemented by types -// to provide custom command line argument completion. -type Completer interface { - // Complete receives a prefix representing a (partial) value - // for its type and should provide a list of possible valid - // completions. - Complete(match string) []Completion -} - -type completion struct { - parser *Parser -} - -// Filename is a string alias which provides filename completion. -type Filename string - -func completionsWithoutDescriptions(items []string) []Completion { - ret := make([]Completion, len(items)) - - for i, v := range items { - ret[i].Item = v - } - - return ret -} - -// Complete returns a list of existing files with the given -// prefix. -func (f *Filename) Complete(match string) []Completion { - ret, _ := filepath.Glob(match + "*") - if len(ret) == 1 { - if info, err := os.Stat(ret[0]); err == nil && info.IsDir() { - ret[0] = ret[0] + "/" - } - } - return completionsWithoutDescriptions(ret) -} - -func (c *completion) skipPositional(s *parseState, n int) { - if n >= len(s.positional) { - s.positional = nil - } else { - s.positional = s.positional[n:] - } -} - -func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion { - if short && len(match) != 0 { - return []Completion{ - { - Item: prefix + match, - }, - } - } - - var results []Completion - repeats := map[string]bool{} - - for name, opt := range s.lookup.longNames { - if strings.HasPrefix(name, match) && !opt.Hidden { - results = append(results, Completion{ - Item: defaultLongOptDelimiter + name, - Description: opt.Description, - }) - - if short { - repeats[string(opt.ShortName)] = true - } - } - } - - if short { - for name, opt := range s.lookup.shortNames { - if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden { - results = append(results, Completion{ - Item: string(defaultShortOptDelimiter) + name, - Description: opt.Description, - }) - } - } - } - - return results -} - -func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion { - return c.completeOptionNames(s, prefix, match, false) -} - -func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion { - return c.completeOptionNames(s, prefix, match, true) -} - -func (c *completion) completeCommands(s *parseState, match string) []Completion { - n := make([]Completion, 0, len(s.command.commands)) - - for _, cmd := range s.command.commands { - if cmd.data != c && !cmd.Hidden && strings.HasPrefix(cmd.Name, match) { - n = append(n, Completion{ - Item: cmd.Name, - Description: cmd.ShortDescription, - }) - } - } - - return n -} - -func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion { - if value.Kind() == reflect.Slice { - value = reflect.New(value.Type().Elem()) - } - i := value.Interface() - - var ret []Completion - - if cmp, ok := i.(Completer); ok { - ret = cmp.Complete(match) - } else if value.CanAddr() { - if cmp, ok = value.Addr().Interface().(Completer); ok { - ret = cmp.Complete(match) - } - } - - for i, v := range ret { - ret[i].Item = prefix + v.Item - } - - return ret -} - -func (c *completion) complete(args []string) []Completion { - if len(args) == 0 { - args = []string{""} - } - - s := &parseState{ - args: args, - } - - c.parser.fillParseState(s) - - var opt *Option - - for len(s.args) > 1 { - arg := s.pop() - - if (c.parser.Options&PassDoubleDash) != None && arg == "--" { - opt = nil - c.skipPositional(s, len(s.args)-1) - - break - } - - if argumentIsOption(arg) { - prefix, optname, islong := stripOptionPrefix(arg) - optname, _, argument := splitOption(prefix, optname, islong) - - if argument == nil { - var o *Option - canarg := true - - if islong { - o = s.lookup.longNames[optname] - } else { - for i, r := range optname { - sname := string(r) - o = s.lookup.shortNames[sname] - - if o == nil { - break - } - - if i == 0 && o.canArgument() && len(optname) != len(sname) { - canarg = false - break - } - } - } - - if o == nil && (c.parser.Options&PassAfterNonOption) != None { - opt = nil - c.skipPositional(s, len(s.args)-1) - - break - } else if o != nil && o.canArgument() && !o.OptionalArgument && canarg { - if len(s.args) > 1 { - s.pop() - } else { - opt = o - } - } - } - } else { - if len(s.positional) > 0 { - if !s.positional[0].isRemaining() { - // Don't advance beyond a remaining positional arg (because - // it consumes all subsequent args). - s.positional = s.positional[1:] - } - } else if cmd, ok := s.lookup.commands[arg]; ok { - cmd.fillParseState(s) - } - - opt = nil - } - } - - lastarg := s.args[len(s.args)-1] - var ret []Completion - - if opt != nil { - // Completion for the argument of 'opt' - ret = c.completeValue(opt.value, "", lastarg) - } else if argumentStartsOption(lastarg) { - // Complete the option - prefix, optname, islong := stripOptionPrefix(lastarg) - optname, split, argument := splitOption(prefix, optname, islong) - - if argument == nil && !islong { - rname, n := utf8.DecodeRuneInString(optname) - sname := string(rname) - - if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() { - ret = c.completeValue(opt.value, prefix+sname, optname[n:]) - } else { - ret = c.completeNamesForShortPrefix(s, prefix, optname) - } - } else if argument != nil { - if islong { - opt = s.lookup.longNames[optname] - } else { - opt = s.lookup.shortNames[optname] - } - - if opt != nil { - ret = c.completeValue(opt.value, prefix+optname+split, *argument) - } - } else if islong { - ret = c.completeNamesForLongPrefix(s, prefix, optname) - } else { - ret = c.completeNamesForShortPrefix(s, prefix, optname) - } - } else if len(s.positional) > 0 { - // Complete for positional argument - ret = c.completeValue(s.positional[0].value, "", lastarg) - } else if len(s.command.commands) > 0 { - // Complete for command - ret = c.completeCommands(s, lastarg) - } - - sort.Sort(completions(ret)) - return ret -} - -func (c *completion) print(items []Completion, showDescriptions bool) { - if showDescriptions && len(items) > 1 { - maxl := 0 - - for _, v := range items { - if len(v.Item) > maxl { - maxl = len(v.Item) - } - } - - for _, v := range items { - fmt.Printf("%s", v.Item) - - if len(v.Description) > 0 { - fmt.Printf("%s # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description) - } - - fmt.Printf("\n") - } - } else { - for _, v := range items { - fmt.Println(v.Item) - } - } -} diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go deleted file mode 100644 index cda29b2f0..000000000 --- a/vendor/github.com/jessevdk/go-flags/convert.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Marshaler is the interface implemented by types that can marshal themselves -// to a string representation of the flag. -type Marshaler interface { - // MarshalFlag marshals a flag value to its string representation. - MarshalFlag() (string, error) -} - -// Unmarshaler is the interface implemented by types that can unmarshal a flag -// argument to themselves. The provided value is directly passed from the -// command line. -type Unmarshaler interface { - // UnmarshalFlag unmarshals a string value representation to the flag - // value (which therefore needs to be a pointer receiver). - UnmarshalFlag(value string) error -} - -// ValueValidator is the interface implemented by types that can validate a -// flag argument themselves. The provided value is directly passed from the -// command line. -type ValueValidator interface { - // IsValidValue returns an error if the provided string value is valid for - // the flag. - IsValidValue(value string) error -} - -func getBase(options multiTag, base int) (int, error) { - sbase := options.Get("base") - - var err error - var ivbase int64 - - if sbase != "" { - ivbase, err = strconv.ParseInt(sbase, 10, 32) - base = int(ivbase) - } - - return base, err -} - -func convertMarshal(val reflect.Value) (bool, string, error) { - // Check first for the Marshaler interface - if val.Type().NumMethod() > 0 && val.CanInterface() { - if marshaler, ok := val.Interface().(Marshaler); ok { - ret, err := marshaler.MarshalFlag() - return true, ret, err - } - } - - return false, "", nil -} - -func convertToString(val reflect.Value, options multiTag) (string, error) { - if ok, ret, err := convertMarshal(val); ok { - return ret, err - } - - tp := val.Type() - - // Support for time.Duration - if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { - stringer := val.Interface().(fmt.Stringer) - return stringer.String(), nil - } - - switch tp.Kind() { - case reflect.String: - return val.String(), nil - case reflect.Bool: - if val.Bool() { - return "true", nil - } - - return "false", nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base, err := getBase(options, 10) - - if err != nil { - return "", err - } - - return strconv.FormatInt(val.Int(), base), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - base, err := getBase(options, 10) - - if err != nil { - return "", err - } - - return strconv.FormatUint(val.Uint(), base), nil - case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil - case reflect.Slice: - if val.Len() == 0 { - return "", nil - } - - ret := "[" - - for i := 0; i < val.Len(); i++ { - if i != 0 { - ret += ", " - } - - item, err := convertToString(val.Index(i), options) - - if err != nil { - return "", err - } - - ret += item - } - - return ret + "]", nil - case reflect.Map: - ret := "{" - - for i, key := range val.MapKeys() { - if i != 0 { - ret += ", " - } - - keyitem, err := convertToString(key, options) - - if err != nil { - return "", err - } - - item, err := convertToString(val.MapIndex(key), options) - - if err != nil { - return "", err - } - - ret += keyitem + ":" + item - } - - return ret + "}", nil - case reflect.Ptr: - return convertToString(reflect.Indirect(val), options) - case reflect.Interface: - if !val.IsNil() { - return convertToString(val.Elem(), options) - } - } - - return "", nil -} - -func convertUnmarshal(val string, retval reflect.Value) (bool, error) { - if retval.Type().NumMethod() > 0 && retval.CanInterface() { - if unmarshaler, ok := retval.Interface().(Unmarshaler); ok { - if retval.IsNil() { - retval.Set(reflect.New(retval.Type().Elem())) - - // Re-assign from the new value - unmarshaler = retval.Interface().(Unmarshaler) - } - - return true, unmarshaler.UnmarshalFlag(val) - } - } - - if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() { - return convertUnmarshal(val, retval.Addr()) - } - - if retval.Type().Kind() == reflect.Interface && !retval.IsNil() { - return convertUnmarshal(val, retval.Elem()) - } - - return false, nil -} - -func convert(val string, retval reflect.Value, options multiTag) error { - if ok, err := convertUnmarshal(val, retval); ok { - return err - } - - tp := retval.Type() - - // Support for time.Duration - if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() { - parsed, err := time.ParseDuration(val) - - if err != nil { - return err - } - - retval.SetInt(int64(parsed)) - return nil - } - - switch tp.Kind() { - case reflect.String: - retval.SetString(val) - case reflect.Bool: - if val == "" { - retval.SetBool(true) - } else { - b, err := strconv.ParseBool(val) - - if err != nil { - return err - } - - retval.SetBool(b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base, err := getBase(options, 10) - - if err != nil { - return err - } - - parsed, err := strconv.ParseInt(val, base, tp.Bits()) - - if err != nil { - return err - } - - retval.SetInt(parsed) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - base, err := getBase(options, 10) - - if err != nil { - return err - } - - parsed, err := strconv.ParseUint(val, base, tp.Bits()) - - if err != nil { - return err - } - - retval.SetUint(parsed) - case reflect.Float32, reflect.Float64: - parsed, err := strconv.ParseFloat(val, tp.Bits()) - - if err != nil { - return err - } - - retval.SetFloat(parsed) - case reflect.Slice: - elemtp := tp.Elem() - - elemvalptr := reflect.New(elemtp) - elemval := reflect.Indirect(elemvalptr) - - if err := convert(val, elemval, options); err != nil { - return err - } - - retval.Set(reflect.Append(retval, elemval)) - case reflect.Map: - parts := strings.SplitN(val, ":", 2) - - key := parts[0] - var value string - - if len(parts) == 2 { - value = parts[1] - } - - keytp := tp.Key() - keyval := reflect.New(keytp) - - if err := convert(key, keyval, options); err != nil { - return err - } - - valuetp := tp.Elem() - valueval := reflect.New(valuetp) - - if err := convert(value, valueval, options); err != nil { - return err - } - - if retval.IsNil() { - retval.Set(reflect.MakeMap(tp)) - } - - retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval)) - case reflect.Ptr: - if retval.IsNil() { - retval.Set(reflect.New(retval.Type().Elem())) - } - - return convert(val, reflect.Indirect(retval), options) - case reflect.Interface: - if !retval.IsNil() { - return convert(val, retval.Elem(), options) - } - } - - return nil -} - -func isPrint(s string) bool { - for _, c := range s { - if !strconv.IsPrint(c) { - return false - } - } - - return true -} - -func quoteIfNeeded(s string) string { - if !isPrint(s) { - return strconv.Quote(s) - } - - return s -} - -func quoteIfNeededV(s []string) []string { - ret := make([]string, len(s)) - - for i, v := range s { - ret[i] = quoteIfNeeded(v) - } - - return ret -} - -func quoteV(s []string) []string { - ret := make([]string, len(s)) - - for i, v := range s { - ret[i] = strconv.Quote(v) - } - - return ret -} - -func unquoteIfPossible(s string) (string, error) { - if len(s) == 0 || s[0] != '"' { - return s, nil - } - - return strconv.Unquote(s) -} diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go deleted file mode 100644 index 73e07cfc2..000000000 --- a/vendor/github.com/jessevdk/go-flags/error.go +++ /dev/null @@ -1,138 +0,0 @@ -package flags - -import ( - "fmt" -) - -// ErrorType represents the type of error. -type ErrorType uint - -const ( - // ErrUnknown indicates a generic error. - ErrUnknown ErrorType = iota - - // ErrExpectedArgument indicates that an argument was expected. - ErrExpectedArgument - - // ErrUnknownFlag indicates an unknown flag. - ErrUnknownFlag - - // ErrUnknownGroup indicates an unknown group. - ErrUnknownGroup - - // ErrMarshal indicates a marshalling error while converting values. - ErrMarshal - - // ErrHelp indicates that the built-in help was shown (the error - // contains the help message). - ErrHelp - - // ErrNoArgumentForBool indicates that an argument was given for a - // boolean flag (which don't not take any arguments). - ErrNoArgumentForBool - - // ErrRequired indicates that a required flag was not provided. - ErrRequired - - // ErrShortNameTooLong indicates that a short flag name was specified, - // longer than one character. - ErrShortNameTooLong - - // ErrDuplicatedFlag indicates that a short or long flag has been - // defined more than once - ErrDuplicatedFlag - - // ErrTag indicates an error while parsing flag tags. - ErrTag - - // ErrCommandRequired indicates that a command was required but not - // specified - ErrCommandRequired - - // ErrUnknownCommand indicates that an unknown command was specified. - ErrUnknownCommand - - // ErrInvalidChoice indicates an invalid option value which only allows - // a certain number of choices. - ErrInvalidChoice - - // ErrInvalidTag indicates an invalid tag or invalid use of an existing tag - ErrInvalidTag -) - -func (e ErrorType) String() string { - switch e { - case ErrUnknown: - return "unknown" - case ErrExpectedArgument: - return "expected argument" - case ErrUnknownFlag: - return "unknown flag" - case ErrUnknownGroup: - return "unknown group" - case ErrMarshal: - return "marshal" - case ErrHelp: - return "help" - case ErrNoArgumentForBool: - return "no argument for bool" - case ErrRequired: - return "required" - case ErrShortNameTooLong: - return "short name too long" - case ErrDuplicatedFlag: - return "duplicated flag" - case ErrTag: - return "tag" - case ErrCommandRequired: - return "command required" - case ErrUnknownCommand: - return "unknown command" - case ErrInvalidChoice: - return "invalid choice" - case ErrInvalidTag: - return "invalid tag" - } - - return "unrecognized error type" -} - -func (e ErrorType) Error() string { - return e.String() -} - -// Error represents a parser error. The error returned from Parse is of this -// type. The error contains both a Type and Message. -type Error struct { - // The type of error - Type ErrorType - - // The error message - Message string -} - -// Error returns the error's message -func (e *Error) Error() string { - return e.Message -} - -func newError(tp ErrorType, message string) *Error { - return &Error{ - Type: tp, - Message: message, - } -} - -func newErrorf(tp ErrorType, format string, args ...interface{}) *Error { - return newError(tp, fmt.Sprintf(format, args...)) -} - -func wrapError(err error) *Error { - ret, ok := err.(*Error) - - if !ok { - return newError(ErrUnknown, err.Error()) - } - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go deleted file mode 100644 index ac2157dd6..000000000 --- a/vendor/github.com/jessevdk/go-flags/flags.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package flags provides an extensive command line option parser. -The flags package is similar in functionality to the go built-in flag package -but provides more options and uses reflection to provide a convenient and -succinct way of specifying command line options. - - -Supported features - -The following features are supported in go-flags: - - Options with short names (-v) - Options with long names (--verbose) - Options with and without arguments (bool v.s. other type) - Options with optional arguments and default values - Option default values from ENVIRONMENT_VARIABLES, including slice and map values - Multiple option groups each containing a set of options - Generate and print well-formatted help message - Passing remaining command line arguments after -- (optional) - Ignoring unknown command line options (optional) - Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification - Supports multiple short options -aux - Supports all primitive go types (string, int{8..64}, uint{8..64}, float) - Supports same option multiple times (can store in slice or last option counts) - Supports maps - Supports function callbacks - Supports namespaces for (nested) option groups - -Additional features specific to Windows: - Options with short names (/v) - Options with long names (/verbose) - Windows-style options with arguments use a colon as the delimiter - Modify generated help message with Windows-style / options - Windows style options can be disabled at build time using the "forceposix" - build tag - - -Basic usage - -The flags package uses structs, reflection and struct field tags -to allow users to specify command line options. This results in very simple -and concise specification of your application options. For example: - - type Options struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - } - -This specifies one option with a short name -v and a long name --verbose. -When either -v or --verbose is found on the command line, a 'true' value -will be appended to the Verbose field. e.g. when specifying -vvv, the -resulting value of Verbose will be {[true, true, true]}. - -Slice options work exactly the same as primitive type options, except that -whenever the option is encountered, a value is appended to the slice. - -Map options from string to primitive type are also supported. On the command -line, you specify the value for such an option as key:value. For example - - type Options struct { - AuthorInfo string[string] `short:"a"` - } - -Then, the AuthorInfo map can be filled with something like --a name:Jesse -a "surname:van den Kieboom". - -Finally, for full control over the conversion between command line argument -values and options, user defined types can choose to implement the Marshaler -and Unmarshaler interfaces. - - -Available field tags - -The following is a list of tags for struct fields supported by go-flags: - - short: the short name of the option (single character) - long: the long name of the option - required: if non empty, makes the option required to appear on the command - line. If a required option is not present, the parser will - return ErrRequired (optional) - description: the description of the option (optional) - long-description: the long description of the option. Currently only - displayed in generated man pages (optional) - no-flag: if non-empty, this field is ignored as an option (optional) - - optional: if non-empty, makes the argument of the option optional. When an - argument is optional it can only be specified using - --option=argument (optional) - optional-value: the value of an optional option when the option occurs - without an argument. This tag can be specified multiple - times in the case of maps or slices (optional) - default: the default value of an option. This tag can be specified - multiple times in the case of slices or maps (optional) - default-mask: when specified, this value will be displayed in the help - instead of the actual default value. This is useful - mostly for hiding otherwise sensitive information from - showing up in the help. If default-mask takes the special - value "-", then no default value will be shown at all - (optional) - env: the default value of the option is overridden from the - specified environment variable, if one has been defined. - (optional) - env-delim: the 'env' default value from environment is split into - multiple values with the given delimiter string, use with - slices and maps (optional) - value-name: the name of the argument value (to be shown in the help) - (optional) - choice: limits the values for an option to a set of values. - Repeat this tag once for each allowable value. - e.g. `long:"animal" choice:"cat" choice:"dog"` - hidden: if non-empty, the option is not visible in the help or man page. - - base: a base (radix) used to convert strings to integer values, the - default base is 10 (i.e. decimal) (optional) - - ini-name: the explicit ini option name (optional) - no-ini: if non-empty this field is ignored as an ini option - (optional) - - group: when specified on a struct field, makes the struct - field a separate group with the given name (optional) - namespace: when specified on a group struct field, the namespace - gets prepended to every option's long name and - subgroup's namespace of this group, separated by - the parser's namespace delimiter (optional) - env-namespace: when specified on a group struct field, the env-namespace - gets prepended to every option's env key and - subgroup's env-namespace of this group, separated by - the parser's env-namespace delimiter (optional) - command: when specified on a struct field, makes the struct - field a (sub)command with the given name (optional) - subcommands-optional: when specified on a command struct field, makes - any subcommands of that command optional (optional) - alias: when specified on a command struct field, adds the - specified name as an alias for the command. Can be - be specified multiple times to add more than one - alias (optional) - positional-args: when specified on a field with a struct type, - uses the fields of that struct to parse remaining - positional command line arguments into (in order - of the fields). If a field has a slice type, - then all remaining arguments will be added to it. - Positional arguments are optional by default, - unless the "required" tag is specified together - with the "positional-args" tag. The "required" tag - can also be set on the individual rest argument - fields, to require only the first N positional - arguments. If the "required" tag is set on the - rest arguments slice, then its value determines - the minimum amount of rest arguments that needs to - be provided (e.g. `required:"2"`) (optional) - positional-arg-name: used on a field in a positional argument struct; name - of the positional argument placeholder to be shown in - the help (optional) - -Either the `short:` tag or the `long:` must be specified to make the field eligible as an -option. - - -Option groups - -Option groups are a simple way to semantically separate your options. All -options in a particular group are shown together in the help under the name -of the group. Namespaces can be used to specify option long names more -precisely and emphasize the options affiliation to their group. - -There are currently three ways to specify option groups. - - 1. Use NewNamedParser specifying the various option groups. - 2. Use AddGroup to add a group to an existing parser. - 3. Add a struct field to the top-level options annotated with the - group:"group-name" tag. - - - -Commands - -The flags package also has basic support for commands. Commands are often -used in monolithic applications that support various commands or actions. -Take git for example, all of the add, commit, checkout, etc. are called -commands. Using commands you can easily separate multiple functions of your -application. - -There are currently two ways to specify a command. - - 1. Use AddCommand on an existing parser. - 2. Add a struct field to your options struct annotated with the - command:"command-name" tag. - -The most common, idiomatic way to implement commands is to define a global -parser instance and implement each command in a separate file. These -command files should define a go init function which calls AddCommand on -the global parser. - -When parsing ends and there is an active command and that command implements -the Commander interface, then its Execute method will be run with the -remaining command line arguments. - -Command structs can have options which become valid to parse after the -command has been specified on the command line, in addition to the options -of all the parent commands. I.e. considering a -v flag on the parser and an -add command, the following are equivalent: - - ./app -v add - ./app add -v - -However, if the -v flag is defined on the add command, then the first of -the two examples above would fail since the -v flag is not defined before -the add command. - - -Completion - -go-flags has builtin support to provide bash completion of flags, commands -and argument values. To use completion, the binary which uses go-flags -can be invoked in a special environment to list completion of the current -command line argument. It should be noted that this `executes` your application, -and it is up to the user to make sure there are no negative side effects (for -example from init functions). - -Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion -by replacing the argument parsing routine with the completion routine which -outputs completions for the passed arguments. The basic invocation to -complete a set of arguments is therefore: - - GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 - -where `completion-example` is the binary, `arg1` and `arg2` are -the current arguments, and `arg3` (the last argument) is the argument -to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then -descriptions of possible completion items will also be shown, if there -are more than 1 completion items. - -To use this with bash completion, a simple file can be written which -calls the binary which supports go-flags completion: - - _completion_example() { - # All arguments except the first one - args=("${COMP_WORDS[@]:1:$COMP_CWORD}") - - # Only split on newlines - local IFS=$'\n' - - # Call completion (note that the first element of COMP_WORDS is - # the executable itself) - COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) - return 0 - } - - complete -F _completion_example completion-example - -Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set. - -Customized completion for argument values is supported by implementing -the flags.Completer interface for the argument value type. An example -of a type which does so is the flags.Filename type, an alias of string -allowing simple filename completion. A slice or array argument value -whose element type implements flags.Completer will also be completed. -*/ -package flags diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go deleted file mode 100644 index 181caabb2..000000000 --- a/vendor/github.com/jessevdk/go-flags/group.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "errors" - "reflect" - "strings" - "unicode/utf8" -) - -// ErrNotPointerToStruct indicates that a provided data container is not -// a pointer to a struct. Only pointers to structs are valid data containers -// for options. -var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct") - -// Group represents an option group. Option groups can be used to logically -// group options together under a description. Groups are only used to provide -// more structure to options both for the user (as displayed in the help message) -// and for you, since groups can be nested. -type Group struct { - // A short description of the group. The - // short description is primarily used in the built-in generated help - // message - ShortDescription string - - // A long description of the group. The long - // description is primarily used to present information on commands - // (Command embeds Group) in the built-in generated help and man pages. - LongDescription string - - // The namespace of the group - Namespace string - - // The environment namespace of the group - EnvNamespace string - - // If true, the group is not displayed in the help or man page - Hidden bool - - // The parent of the group or nil if it has no parent - parent interface{} - - // All the options in the group - options []*Option - - // All the subgroups - groups []*Group - - // Whether the group represents the built-in help group - isBuiltinHelp bool - - data interface{} -} - -type scanHandler func(reflect.Value, *reflect.StructField) (bool, error) - -// AddGroup adds a new group to the command with the given name and data. The -// data needs to be a pointer to a struct from which the fields indicate which -// options are in the group. -func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) { - group := newGroup(shortDescription, longDescription, data) - - group.parent = g - - if err := group.scan(); err != nil { - return nil, err - } - - g.groups = append(g.groups, group) - return group, nil -} - -// AddOption adds a new option to this group. -func (g *Group) AddOption(option *Option, data interface{}) { - option.value = reflect.ValueOf(data) - option.group = g - g.options = append(g.options, option) -} - -// Groups returns the list of groups embedded in this group. -func (g *Group) Groups() []*Group { - return g.groups -} - -// Options returns the list of options in this group. -func (g *Group) Options() []*Option { - return g.options -} - -// Find locates the subgroup with the given short description and returns it. -// If no such group can be found Find will return nil. Note that the description -// is matched case insensitively. -func (g *Group) Find(shortDescription string) *Group { - lshortDescription := strings.ToLower(shortDescription) - - var ret *Group - - g.eachGroup(func(gg *Group) { - if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription { - ret = gg - } - }) - - return ret -} - -func (g *Group) findOption(matcher func(*Option) bool) (option *Option) { - g.eachGroup(func(g *Group) { - for _, opt := range g.options { - if option == nil && matcher(opt) { - option = opt - } - } - }) - - return option -} - -// FindOptionByLongName finds an option that is part of the group, or any of its -// subgroups, by matching its long name (including the option namespace). -func (g *Group) FindOptionByLongName(longName string) *Option { - return g.findOption(func(option *Option) bool { - return option.LongNameWithNamespace() == longName - }) -} - -// FindOptionByShortName finds an option that is part of the group, or any of -// its subgroups, by matching its short name. -func (g *Group) FindOptionByShortName(shortName rune) *Option { - return g.findOption(func(option *Option) bool { - return option.ShortName == shortName - }) -} - -func newGroup(shortDescription string, longDescription string, data interface{}) *Group { - return &Group{ - ShortDescription: shortDescription, - LongDescription: longDescription, - - data: data, - } -} - -func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option { - prio := 0 - var retopt *Option - - g.eachGroup(func(g *Group) { - for _, opt := range g.options { - if namematch != nil && namematch(opt, name) && prio < 4 { - retopt = opt - prio = 4 - } - - if name == opt.field.Name && prio < 3 { - retopt = opt - prio = 3 - } - - if name == opt.LongNameWithNamespace() && prio < 2 { - retopt = opt - prio = 2 - } - - if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 { - retopt = opt - prio = 1 - } - } - }) - - return retopt -} - -func (g *Group) showInHelp() bool { - if g.Hidden { - return false - } - for _, opt := range g.options { - if opt.showInHelp() { - return true - } - } - return false -} - -func (g *Group) eachGroup(f func(*Group)) { - f(g) - - for _, gg := range g.groups { - gg.eachGroup(f) - } -} - -func isStringFalsy(s string) bool { - return s == "" || s == "false" || s == "no" || s == "0" -} - -func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error { - stype := realval.Type() - - if sfield != nil { - if ok, err := handler(realval, sfield); err != nil { - return err - } else if ok { - return nil - } - } - - for i := 0; i < stype.NumField(); i++ { - field := stype.Field(i) - - // PkgName is set only for non-exported fields, which we ignore - if field.PkgPath != "" && !field.Anonymous { - continue - } - - mtag := newMultiTag(string(field.Tag)) - - if err := mtag.Parse(); err != nil { - return err - } - - // Skip fields with the no-flag tag - if mtag.Get("no-flag") != "" { - continue - } - - // Dive deep into structs or pointers to structs - kind := field.Type.Kind() - fld := realval.Field(i) - - if kind == reflect.Struct { - if err := g.scanStruct(fld, &field, handler); err != nil { - return err - } - } else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - flagCountBefore := len(g.options) + len(g.groups) - - if fld.IsNil() { - fld = reflect.New(fld.Type().Elem()) - } - - if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil { - return err - } - - if len(g.options)+len(g.groups) != flagCountBefore { - realval.Field(i).Set(fld) - } - } - - longname := mtag.Get("long") - shortname := mtag.Get("short") - - // Need at least either a short or long name - if longname == "" && shortname == "" && mtag.Get("ini-name") == "" { - continue - } - - short := rune(0) - rc := utf8.RuneCountInString(shortname) - - if rc > 1 { - return newErrorf(ErrShortNameTooLong, - "short names can only be 1 character long, not `%s'", - shortname) - - } else if rc == 1 { - short, _ = utf8.DecodeRuneInString(shortname) - } - - description := mtag.Get("description") - def := mtag.GetMany("default") - - optionalValue := mtag.GetMany("optional-value") - valueName := mtag.Get("value-name") - defaultMask := mtag.Get("default-mask") - - optional := !isStringFalsy(mtag.Get("optional")) - required := !isStringFalsy(mtag.Get("required")) - choices := mtag.GetMany("choice") - hidden := !isStringFalsy(mtag.Get("hidden")) - - option := &Option{ - Description: description, - ShortName: short, - LongName: longname, - Default: def, - EnvDefaultKey: mtag.Get("env"), - EnvDefaultDelim: mtag.Get("env-delim"), - OptionalArgument: optional, - OptionalValue: optionalValue, - Required: required, - ValueName: valueName, - DefaultMask: defaultMask, - Choices: choices, - Hidden: hidden, - - group: g, - - field: field, - value: realval.Field(i), - tag: mtag, - } - - if option.isBool() && option.Default != nil { - return newErrorf(ErrInvalidTag, - "boolean flag `%s' may not have default values, they always default to `false' and can only be turned on", - option.shortAndLongName()) - } - - g.options = append(g.options, option) - } - - return nil -} - -func (g *Group) checkForDuplicateFlags() *Error { - shortNames := make(map[rune]*Option) - longNames := make(map[string]*Option) - - var duplicateError *Error - - g.eachGroup(func(g *Group) { - for _, option := range g.options { - if option.LongName != "" { - longName := option.LongNameWithNamespace() - - if otherOption, ok := longNames[longName]; ok { - duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption) - return - } - longNames[longName] = option - } - if option.ShortName != 0 { - if otherOption, ok := shortNames[option.ShortName]; ok { - duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption) - return - } - shortNames[option.ShortName] = option - } - } - }) - - return duplicateError -} - -func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) { - mtag := newMultiTag(string(sfield.Tag)) - - if err := mtag.Parse(); err != nil { - return true, err - } - - subgroup := mtag.Get("group") - - if len(subgroup) != 0 { - var ptrval reflect.Value - - if realval.Kind() == reflect.Ptr { - ptrval = realval - - if ptrval.IsNil() { - ptrval.Set(reflect.New(ptrval.Type())) - } - } else { - ptrval = realval.Addr() - } - - description := mtag.Get("description") - - group, err := g.AddGroup(subgroup, description, ptrval.Interface()) - - if err != nil { - return true, err - } - - group.Namespace = mtag.Get("namespace") - group.EnvNamespace = mtag.Get("env-namespace") - group.Hidden = mtag.Get("hidden") != "" - - return true, nil - } - - return false, nil -} - -func (g *Group) scanType(handler scanHandler) error { - // Get all the public fields in the data struct - ptrval := reflect.ValueOf(g.data) - - if ptrval.Type().Kind() != reflect.Ptr { - panic(ErrNotPointerToStruct) - } - - stype := ptrval.Type().Elem() - - if stype.Kind() != reflect.Struct { - panic(ErrNotPointerToStruct) - } - - realval := reflect.Indirect(ptrval) - - if err := g.scanStruct(realval, nil, handler); err != nil { - return err - } - - if err := g.checkForDuplicateFlags(); err != nil { - return err - } - - return nil -} - -func (g *Group) scan() error { - return g.scanType(g.scanSubGroupHandler) -} - -func (g *Group) groupByName(name string) *Group { - if len(name) == 0 { - return g - } - - return g.Find(name) -} diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go deleted file mode 100644 index 068fce152..000000000 --- a/vendor/github.com/jessevdk/go-flags/help.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "bufio" - "bytes" - "fmt" - "io" - "runtime" - "strings" - "unicode/utf8" -) - -type alignmentInfo struct { - maxLongLen int - hasShort bool - hasValueName bool - terminalColumns int - indent bool -} - -const ( - paddingBeforeOption = 2 - distanceBetweenOptionAndDescription = 2 -) - -func (a *alignmentInfo) descriptionStart() int { - ret := a.maxLongLen + distanceBetweenOptionAndDescription - - if a.hasShort { - ret += 2 - } - - if a.maxLongLen > 0 { - ret += 4 - } - - if a.hasValueName { - ret += 3 - } - - return ret -} - -func (a *alignmentInfo) updateLen(name string, indent bool) { - l := utf8.RuneCountInString(name) - - if indent { - l = l + 4 - } - - if l > a.maxLongLen { - a.maxLongLen = l - } -} - -func (p *Parser) getAlignmentInfo() alignmentInfo { - ret := alignmentInfo{ - maxLongLen: 0, - hasShort: false, - hasValueName: false, - terminalColumns: getTerminalColumns(), - } - - if ret.terminalColumns <= 0 { - ret.terminalColumns = 80 - } - - var prevcmd *Command - - p.eachActiveGroup(func(c *Command, grp *Group) { - if !grp.showInHelp() { - return - } - if c != prevcmd { - for _, arg := range c.args { - ret.updateLen(arg.Name, c != p.Command) - } - } - - for _, info := range grp.options { - if !info.showInHelp() { - continue - } - - if info.ShortName != 0 { - ret.hasShort = true - } - - if len(info.ValueName) > 0 { - ret.hasValueName = true - } - - l := info.LongNameWithNamespace() + info.ValueName - - if len(info.Choices) != 0 { - l += "[" + strings.Join(info.Choices, "|") + "]" - } - - ret.updateLen(l, c != p.Command) - } - }) - - return ret -} - -func wrapText(s string, l int, prefix string) string { - var ret string - - if l < 10 { - l = 10 - } - - // Basic text wrapping of s at spaces to fit in l - lines := strings.Split(s, "\n") - - for _, line := range lines { - var retline string - - line = strings.TrimSpace(line) - - for len(line) > l { - // Try to split on space - suffix := "" - - pos := strings.LastIndex(line[:l], " ") - - if pos < 0 { - pos = l - 1 - suffix = "-\n" - } - - if len(retline) != 0 { - retline += "\n" + prefix - } - - retline += strings.TrimSpace(line[:pos]) + suffix - line = strings.TrimSpace(line[pos:]) - } - - if len(line) > 0 { - if len(retline) != 0 { - retline += "\n" + prefix - } - - retline += line - } - - if len(ret) > 0 { - ret += "\n" - - if len(retline) > 0 { - ret += prefix - } - } - - ret += retline - } - - return ret -} - -func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) { - line := &bytes.Buffer{} - - prefix := paddingBeforeOption - - if info.indent { - prefix += 4 - } - - if option.Hidden { - return - } - - line.WriteString(strings.Repeat(" ", prefix)) - - if option.ShortName != 0 { - line.WriteRune(defaultShortOptDelimiter) - line.WriteRune(option.ShortName) - } else if info.hasShort { - line.WriteString(" ") - } - - descstart := info.descriptionStart() + paddingBeforeOption - - if len(option.LongName) > 0 { - if option.ShortName != 0 { - line.WriteString(", ") - } else if info.hasShort { - line.WriteString(" ") - } - - line.WriteString(defaultLongOptDelimiter) - line.WriteString(option.LongNameWithNamespace()) - } - - if option.canArgument() { - line.WriteRune(defaultNameArgDelimiter) - - if len(option.ValueName) > 0 { - line.WriteString(option.ValueName) - } - - if len(option.Choices) > 0 { - line.WriteString("[" + strings.Join(option.Choices, "|") + "]") - } - } - - written := line.Len() - line.WriteTo(writer) - - if option.Description != "" { - dw := descstart - written - writer.WriteString(strings.Repeat(" ", dw)) - - var def string - - if len(option.DefaultMask) != 0 { - if option.DefaultMask != "-" { - def = option.DefaultMask - } - } else { - def = option.defaultLiteral - } - - var envDef string - if option.EnvKeyWithNamespace() != "" { - var envPrintable string - if runtime.GOOS == "windows" { - envPrintable = "%" + option.EnvKeyWithNamespace() + "%" - } else { - envPrintable = "$" + option.EnvKeyWithNamespace() - } - envDef = fmt.Sprintf(" [%s]", envPrintable) - } - - var desc string - - if def != "" { - desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef) - } else { - desc = option.Description + envDef - } - - writer.WriteString(wrapText(desc, - info.terminalColumns-descstart, - strings.Repeat(" ", descstart))) - } - - writer.WriteString("\n") -} - -func maxCommandLength(s []*Command) int { - if len(s) == 0 { - return 0 - } - - ret := len(s[0].Name) - - for _, v := range s[1:] { - l := len(v.Name) - - if l > ret { - ret = l - } - } - - return ret -} - -// WriteHelp writes a help message containing all the possible options and -// their descriptions to the provided writer. Note that the HelpFlag parser -// option provides a convenient way to add a -h/--help option group to the -// command line parser which will automatically show the help messages using -// this method. -func (p *Parser) WriteHelp(writer io.Writer) { - if writer == nil { - return - } - - wr := bufio.NewWriter(writer) - aligninfo := p.getAlignmentInfo() - - cmd := p.Command - - for cmd.Active != nil { - cmd = cmd.Active - } - - if p.Name != "" { - wr.WriteString("Usage:\n") - wr.WriteString(" ") - - allcmd := p.Command - - for allcmd != nil { - var usage string - - if allcmd == p.Command { - if len(p.Usage) != 0 { - usage = p.Usage - } else if p.Options&HelpFlag != 0 { - usage = "[OPTIONS]" - } - } else if us, ok := allcmd.data.(Usage); ok { - usage = us.Usage() - } else if allcmd.hasHelpOptions() { - usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name) - } - - if len(usage) != 0 { - fmt.Fprintf(wr, " %s %s", allcmd.Name, usage) - } else { - fmt.Fprintf(wr, " %s", allcmd.Name) - } - - if len(allcmd.args) > 0 { - fmt.Fprintf(wr, " ") - } - - for i, arg := range allcmd.args { - if i != 0 { - fmt.Fprintf(wr, " ") - } - - name := arg.Name - - if arg.isRemaining() { - name = name + "..." - } - - if !allcmd.ArgsRequired { - fmt.Fprintf(wr, "[%s]", name) - } else { - fmt.Fprintf(wr, "%s", name) - } - } - - if allcmd.Active == nil && len(allcmd.commands) > 0 { - var co, cc string - - if allcmd.SubcommandsOptional { - co, cc = "[", "]" - } else { - co, cc = "<", ">" - } - - visibleCommands := allcmd.visibleCommands() - - if len(visibleCommands) > 3 { - fmt.Fprintf(wr, " %scommand%s", co, cc) - } else { - subcommands := allcmd.sortedVisibleCommands() - names := make([]string, len(subcommands)) - - for i, subc := range subcommands { - names[i] = subc.Name - } - - fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc) - } - } - - allcmd = allcmd.Active - } - - fmt.Fprintln(wr) - - if len(cmd.LongDescription) != 0 { - fmt.Fprintln(wr) - - t := wrapText(cmd.LongDescription, - aligninfo.terminalColumns, - "") - - fmt.Fprintln(wr, t) - } - } - - c := p.Command - - for c != nil { - printcmd := c != p.Command - - c.eachGroup(func(grp *Group) { - first := true - - // Skip built-in help group for all commands except the top-level - // parser - if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) { - return - } - - for _, info := range grp.options { - if !info.showInHelp() { - continue - } - - if printcmd { - fmt.Fprintf(wr, "\n[%s command options]\n", c.Name) - aligninfo.indent = true - printcmd = false - } - - if first && cmd.Group != grp { - fmt.Fprintln(wr) - - if aligninfo.indent { - wr.WriteString(" ") - } - - fmt.Fprintf(wr, "%s:\n", grp.ShortDescription) - first = false - } - - p.writeHelpOption(wr, info, aligninfo) - } - }) - - var args []*Arg - for _, arg := range c.args { - if arg.Description != "" { - args = append(args, arg) - } - } - - if len(args) > 0 { - if c == p.Command { - fmt.Fprintf(wr, "\nArguments:\n") - } else { - fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name) - } - - descStart := aligninfo.descriptionStart() + paddingBeforeOption - - for _, arg := range args { - argPrefix := strings.Repeat(" ", paddingBeforeOption) - argPrefix += arg.Name - - if len(arg.Description) > 0 { - argPrefix += ":" - wr.WriteString(argPrefix) - - // Space between "arg:" and the description start - descPadding := strings.Repeat(" ", descStart-len(argPrefix)) - // How much space the description gets before wrapping - descWidth := aligninfo.terminalColumns - 1 - descStart - // Whitespace to which we can indent new description lines - descPrefix := strings.Repeat(" ", descStart) - - wr.WriteString(descPadding) - wr.WriteString(wrapText(arg.Description, descWidth, descPrefix)) - } else { - wr.WriteString(argPrefix) - } - - fmt.Fprintln(wr) - } - } - - c = c.Active - } - - scommands := cmd.sortedVisibleCommands() - - if len(scommands) > 0 { - maxnamelen := maxCommandLength(scommands) - - fmt.Fprintln(wr) - fmt.Fprintln(wr, "Available commands:") - - for _, c := range scommands { - fmt.Fprintf(wr, " %s", c.Name) - - if len(c.ShortDescription) > 0 { - pad := strings.Repeat(" ", maxnamelen-len(c.Name)) - fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription) - - if len(c.Aliases) > 0 { - fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", ")) - } - - } - - fmt.Fprintln(wr) - } - } - - wr.Flush() -} - -// WroteHelp is a helper to test the error from ParseArgs() to -// determine if the help message was written. It is safe to -// call without first checking that error is nil. -func WroteHelp(err error) bool { - if err == nil { // No error - return false - } - - flagError, ok := err.(*Error) - if !ok { // Not a go-flag error - return false - } - - if flagError.Type != ErrHelp { // Did not print the help message - return false - } - - return true -} diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go deleted file mode 100644 index 60b36c79c..000000000 --- a/vendor/github.com/jessevdk/go-flags/ini.go +++ /dev/null @@ -1,615 +0,0 @@ -package flags - -import ( - "bufio" - "fmt" - "io" - "os" - "reflect" - "sort" - "strconv" - "strings" -) - -// IniError contains location information on where an error occurred. -type IniError struct { - // The error message. - Message string - - // The filename of the file in which the error occurred. - File string - - // The line number at which the error occurred. - LineNumber uint -} - -// Error provides a "file:line: message" formatted message of the ini error. -func (x *IniError) Error() string { - return fmt.Sprintf( - "%s:%d: %s", - x.File, - x.LineNumber, - x.Message, - ) -} - -// IniOptions for writing -type IniOptions uint - -const ( - // IniNone indicates no options. - IniNone IniOptions = 0 - - // IniIncludeDefaults indicates that default values should be written. - IniIncludeDefaults = 1 << iota - - // IniCommentDefaults indicates that if IniIncludeDefaults is used - // options with default values are written but commented out. - IniCommentDefaults - - // IniIncludeComments indicates that comments containing the description - // of an option should be written. - IniIncludeComments - - // IniDefault provides a default set of options. - IniDefault = IniIncludeComments -) - -// IniParser is a utility to read and write flags options from and to ini -// formatted strings. -type IniParser struct { - ParseAsDefaults bool // override default flags - - parser *Parser -} - -type iniValue struct { - Name string - Value string - Quoted bool - LineNumber uint -} - -type iniSection []iniValue - -type ini struct { - File string - Sections map[string]iniSection -} - -// NewIniParser creates a new ini parser for a given Parser. -func NewIniParser(p *Parser) *IniParser { - return &IniParser{ - parser: p, - } -} - -// IniParse is a convenience function to parse command line options with default -// settings from an ini formatted file. The provided data is a pointer to a struct -// representing the default option group (named "Application Options"). For -// more control, use flags.NewParser. -func IniParse(filename string, data interface{}) error { - p := NewParser(data, Default) - - return NewIniParser(p).ParseFile(filename) -} - -// ParseFile parses flags from an ini formatted file. See Parse for more -// information on the ini file format. The returned errors can be of the type -// flags.Error or flags.IniError. -func (i *IniParser) ParseFile(filename string) error { - ini, err := readIniFromFile(filename) - - if err != nil { - return err - } - - return i.parse(ini) -} - -// Parse parses flags from an ini format. You can use ParseFile as a -// convenience function to parse from a filename instead of a general -// io.Reader. -// -// The format of the ini file is as follows: -// -// [Option group name] -// option = value -// -// Each section in the ini file represents an option group or command in the -// flags parser. The default flags parser option group (i.e. when using -// flags.Parse) is named 'Application Options'. The ini option name is matched -// in the following order: -// -// 1. Compared to the ini-name tag on the option struct field (if present) -// 2. Compared to the struct field name -// 3. Compared to the option long name (if present) -// 4. Compared to the option short name (if present) -// -// Sections for nested groups and commands can be addressed using a dot `.' -// namespacing notation (i.e [subcommand.Options]). Group section names are -// matched case insensitive. -// -// The returned errors can be of the type flags.Error or flags.IniError. -func (i *IniParser) Parse(reader io.Reader) error { - ini, err := readIni(reader, "") - - if err != nil { - return err - } - - return i.parse(ini) -} - -// WriteFile writes the flags as ini format into a file. See Write -// for more information. The returned error occurs when the specified file -// could not be opened for writing. -func (i *IniParser) WriteFile(filename string, options IniOptions) error { - return writeIniToFile(i, filename, options) -} - -// Write writes the current values of all the flags to an ini format. -// See Parse for more information on the ini file format. You typically -// call this only after settings have been parsed since the default values of each -// option are stored just before parsing the flags (this is only relevant when -// IniIncludeDefaults is _not_ set in options). -func (i *IniParser) Write(writer io.Writer, options IniOptions) { - writeIni(i, writer, options) -} - -func readFullLine(reader *bufio.Reader) (string, error) { - var line []byte - - for { - l, more, err := reader.ReadLine() - - if err != nil { - return "", err - } - - if line == nil && !more { - return string(l), nil - } - - line = append(line, l...) - - if !more { - break - } - } - - return string(line), nil -} - -func optionIniName(option *Option) string { - name := option.tag.Get("_read-ini-name") - - if len(name) != 0 { - return name - } - - name = option.tag.Get("ini-name") - - if len(name) != 0 { - return name - } - - return option.field.Name -} - -func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) { - var sname string - - if len(namespace) != 0 { - sname = namespace - } - - if cmd.Group != group && len(group.ShortDescription) != 0 { - if len(sname) != 0 { - sname += "." - } - - sname += group.ShortDescription - } - - sectionwritten := false - comments := (options & IniIncludeComments) != IniNone - - for _, option := range group.options { - if option.isFunc() || option.Hidden { - continue - } - - if len(option.tag.Get("no-ini")) != 0 { - continue - } - - val := option.value - - if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() { - continue - } - - if !sectionwritten { - fmt.Fprintf(writer, "[%s]\n", sname) - sectionwritten = true - } - - if comments && len(option.Description) != 0 { - fmt.Fprintf(writer, "; %s\n", option.Description) - } - - oname := optionIniName(option) - - commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault() - - kind := val.Type().Kind() - switch kind { - case reflect.Slice: - kind = val.Type().Elem().Kind() - - if val.Len() == 0 { - writeOption(writer, oname, kind, "", "", true, option.iniQuote) - } else { - for idx := 0; idx < val.Len(); idx++ { - v, _ := convertToString(val.Index(idx), option.tag) - - writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) - } - } - case reflect.Map: - kind = val.Type().Elem().Kind() - - if val.Len() == 0 { - writeOption(writer, oname, kind, "", "", true, option.iniQuote) - } else { - mkeys := val.MapKeys() - keys := make([]string, len(val.MapKeys())) - kkmap := make(map[string]reflect.Value) - - for i, k := range mkeys { - keys[i], _ = convertToString(k, option.tag) - kkmap[keys[i]] = k - } - - sort.Strings(keys) - - for _, k := range keys { - v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag) - - writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote) - } - } - default: - v, _ := convertToString(val, option.tag) - - writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote) - } - - if comments { - fmt.Fprintln(writer) - } - } - - if sectionwritten && !comments { - fmt.Fprintln(writer) - } -} - -func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) { - if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) { - optionValue = strconv.Quote(optionValue) - } - - comment := "" - if commentOption { - comment = "; " - } - - fmt.Fprintf(writer, "%s%s =", comment, optionName) - - if optionKey != "" { - fmt.Fprintf(writer, " %s:%s", optionKey, optionValue) - } else if optionValue != "" { - fmt.Fprintf(writer, " %s", optionValue) - } - - fmt.Fprintln(writer) -} - -func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) { - command.eachGroup(func(group *Group) { - if !group.Hidden { - writeGroupIni(command, group, namespace, writer, options) - } - }) - - for _, c := range command.commands { - var fqn string - - if c.Hidden { - continue - } - - if len(namespace) != 0 { - fqn = namespace + "." + c.Name - } else { - fqn = c.Name - } - - writeCommandIni(c, fqn, writer, options) - } -} - -func writeIni(parser *IniParser, writer io.Writer, options IniOptions) { - writeCommandIni(parser.parser.Command, "", writer, options) -} - -func writeIniToFile(parser *IniParser, filename string, options IniOptions) error { - file, err := os.Create(filename) - - if err != nil { - return err - } - - defer file.Close() - - writeIni(parser, file, options) - - return nil -} - -func readIniFromFile(filename string) (*ini, error) { - file, err := os.Open(filename) - - if err != nil { - return nil, err - } - - defer file.Close() - - return readIni(file, filename) -} - -func readIni(contents io.Reader, filename string) (*ini, error) { - ret := &ini{ - File: filename, - Sections: make(map[string]iniSection), - } - - reader := bufio.NewReader(contents) - - // Empty global section - section := make(iniSection, 0, 10) - sectionname := "" - - ret.Sections[sectionname] = section - - var lineno uint - - for { - line, err := readFullLine(reader) - - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - lineno++ - line = strings.TrimSpace(line) - - // Skip empty lines and lines starting with ; (comments) - if len(line) == 0 || line[0] == ';' || line[0] == '#' { - continue - } - - if line[0] == '[' { - if line[0] != '[' || line[len(line)-1] != ']' { - return nil, &IniError{ - Message: "malformed section header", - File: filename, - LineNumber: lineno, - } - } - - name := strings.TrimSpace(line[1 : len(line)-1]) - - if len(name) == 0 { - return nil, &IniError{ - Message: "empty section name", - File: filename, - LineNumber: lineno, - } - } - - sectionname = name - section = ret.Sections[name] - - if section == nil { - section = make(iniSection, 0, 10) - ret.Sections[name] = section - } - - continue - } - - // Parse option here - keyval := strings.SplitN(line, "=", 2) - - if len(keyval) != 2 { - return nil, &IniError{ - Message: fmt.Sprintf("malformed key=value (%s)", line), - File: filename, - LineNumber: lineno, - } - } - - name := strings.TrimSpace(keyval[0]) - value := strings.TrimSpace(keyval[1]) - quoted := false - - if len(value) != 0 && value[0] == '"' { - if v, err := strconv.Unquote(value); err == nil { - value = v - - quoted = true - } else { - return nil, &IniError{ - Message: err.Error(), - File: filename, - LineNumber: lineno, - } - } - } - - section = append(section, iniValue{ - Name: name, - Value: value, - Quoted: quoted, - LineNumber: lineno, - }) - - ret.Sections[sectionname] = section - } - - return ret, nil -} - -func (i *IniParser) matchingGroups(name string) []*Group { - if len(name) == 0 { - var ret []*Group - - i.parser.eachGroup(func(g *Group) { - ret = append(ret, g) - }) - - return ret - } - - g := i.parser.groupByName(name) - - if g != nil { - return []*Group{g} - } - - return nil -} - -func (i *IniParser) parse(ini *ini) error { - p := i.parser - - p.eachOption(func(cmd *Command, group *Group, option *Option) { - option.clearReferenceBeforeSet = true - }) - - var quotesLookup = make(map[*Option]bool) - - for name, section := range ini.Sections { - groups := i.matchingGroups(name) - - if len(groups) == 0 { - if (p.Options & IgnoreUnknown) == None { - return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name) - } - - continue - } - - for _, inival := range section { - var opt *Option - - for _, group := range groups { - opt = group.optionByName(inival.Name, func(o *Option, n string) bool { - return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n) - }) - - if opt != nil && len(opt.tag.Get("no-ini")) != 0 { - opt = nil - } - - if opt != nil { - break - } - } - - if opt == nil { - if (p.Options & IgnoreUnknown) == None { - return &IniError{ - Message: fmt.Sprintf("unknown option: %s", inival.Name), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - continue - } - - // ini value is ignored if parsed as default but defaults are prevented - if i.ParseAsDefaults && opt.preventDefault { - continue - } - - pval := &inival.Value - - if !opt.canArgument() && len(inival.Value) == 0 { - pval = nil - } else { - if opt.value.Type().Kind() == reflect.Map { - parts := strings.SplitN(inival.Value, ":", 2) - - // only handle unquoting - if len(parts) == 2 && parts[1][0] == '"' { - if v, err := strconv.Unquote(parts[1]); err == nil { - parts[1] = v - - inival.Quoted = true - } else { - return &IniError{ - Message: err.Error(), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - s := parts[0] + ":" + parts[1] - - pval = &s - } - } - } - - var err error - - if i.ParseAsDefaults { - err = opt.setDefault(pval) - } else { - err = opt.set(pval) - } - - if err != nil { - return &IniError{ - Message: err.Error(), - File: ini.File, - LineNumber: inival.LineNumber, - } - } - - // Defaults from ini files take precendence over defaults from parser - opt.preventDefault = true - - // either all INI values are quoted or only values who need quoting - if _, ok := quotesLookup[opt]; !inival.Quoted || !ok { - quotesLookup[opt] = inival.Quoted - } - - opt.tag.Set("_read-ini-name", inival.Name) - } - } - - for opt, quoted := range quotesLookup { - opt.iniQuote = quoted - } - - return nil -} diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go deleted file mode 100644 index 82572f9a7..000000000 --- a/vendor/github.com/jessevdk/go-flags/man.go +++ /dev/null @@ -1,223 +0,0 @@ -package flags - -import ( - "fmt" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -func manQuoteLines(s string) string { - lines := strings.Split(s, "\n") - parts := []string{} - - for _, line := range lines { - parts = append(parts, manQuote(line)) - } - - return strings.Join(parts, "\n") -} - -func manQuote(s string) string { - return strings.Replace(s, "\\", "\\\\", -1) -} - -func formatForMan(wr io.Writer, s string, quoter func(s string) string) { - for { - idx := strings.IndexRune(s, '`') - - if idx < 0 { - fmt.Fprintf(wr, "%s", quoter(s)) - break - } - - fmt.Fprintf(wr, "%s", quoter(s[:idx])) - - s = s[idx+1:] - idx = strings.IndexRune(s, '\'') - - if idx < 0 { - fmt.Fprintf(wr, "%s", quoter(s)) - break - } - - fmt.Fprintf(wr, "\\fB%s\\fP", quoter(s[:idx])) - s = s[idx+1:] - } -} - -func writeManPageOptions(wr io.Writer, grp *Group) { - grp.eachGroup(func(group *Group) { - if !group.showInHelp() { - return - } - - // If the parent (grp) has any subgroups, display their descriptions as - // subsection headers similar to the output of --help. - if group.ShortDescription != "" && len(grp.groups) > 0 { - fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription) - - if group.LongDescription != "" { - formatForMan(wr, group.LongDescription, manQuoteLines) - fmt.Fprintln(wr, "") - } - } - - for _, opt := range group.options { - if !opt.showInHelp() { - continue - } - - fmt.Fprintln(wr, ".TP") - fmt.Fprintf(wr, "\\fB") - - if opt.ShortName != 0 { - fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName) - } - - if len(opt.LongName) != 0 { - if opt.ShortName != 0 { - fmt.Fprintf(wr, ", ") - } - - fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace())) - } - - if len(opt.ValueName) != 0 || opt.OptionalArgument { - if opt.OptionalArgument { - fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", "))) - } else { - fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName)) - } - } - - if len(opt.Default) != 0 { - fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", "))) - } else if len(opt.EnvKeyWithNamespace()) != 0 { - if runtime.GOOS == "windows" { - fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) - } else { - fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) - } - } - - if opt.Required { - fmt.Fprintf(wr, " (\\fIrequired\\fR)") - } - - fmt.Fprintln(wr, "\\fP") - - if len(opt.Description) != 0 { - formatForMan(wr, opt.Description, manQuoteLines) - fmt.Fprintln(wr, "") - } - } - }) -} - -func writeManPageSubcommands(wr io.Writer, name string, usagePrefix string, root *Command) { - commands := root.sortedVisibleCommands() - - for _, c := range commands { - var nn string - - if c.Hidden { - continue - } - - if len(name) != 0 { - nn = name + " " + c.Name - } else { - nn = c.Name - } - - writeManPageCommand(wr, nn, usagePrefix, c) - } -} - -func writeManPageCommand(wr io.Writer, name string, usagePrefix string, command *Command) { - fmt.Fprintf(wr, ".SS %s\n", name) - fmt.Fprintln(wr, command.ShortDescription) - - if len(command.LongDescription) > 0 { - fmt.Fprintln(wr, "") - - cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name)) - - if strings.HasPrefix(command.LongDescription, cmdstart) { - fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name)) - - formatForMan(wr, command.LongDescription[len(cmdstart):], manQuoteLines) - fmt.Fprintln(wr, "") - } else { - formatForMan(wr, command.LongDescription, manQuoteLines) - fmt.Fprintln(wr, "") - } - } - - var pre = usagePrefix + " " + command.Name - - var usage string - if us, ok := command.data.(Usage); ok { - usage = us.Usage() - } else if command.hasHelpOptions() { - usage = fmt.Sprintf("[%s-OPTIONS]", command.Name) - } - - var nextPrefix = pre - if len(usage) > 0 { - fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage)) - nextPrefix = pre + " " + usage - } - - if len(command.Aliases) > 0 { - fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", "))) - } - - writeManPageOptions(wr, command.Group) - writeManPageSubcommands(wr, name, nextPrefix, command) -} - -// WriteManPage writes a basic man page in groff format to the specified -// writer. -func (p *Parser) WriteManPage(wr io.Writer) { - t := time.Now() - source_date_epoch := os.Getenv("SOURCE_DATE_EPOCH") - if source_date_epoch != "" { - sde, err := strconv.ParseInt(source_date_epoch, 10, 64) - if err != nil { - panic(fmt.Sprintf("Invalid SOURCE_DATE_EPOCH: %s", err)) - } - t = time.Unix(sde, 0) - } - - fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006")) - fmt.Fprintln(wr, ".SH NAME") - fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuoteLines(p.ShortDescription)) - fmt.Fprintln(wr, ".SH SYNOPSIS") - - usage := p.Usage - - if len(usage) == 0 { - usage = "[OPTIONS]" - } - - fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage)) - fmt.Fprintln(wr, ".SH DESCRIPTION") - - formatForMan(wr, p.LongDescription, manQuoteLines) - fmt.Fprintln(wr, "") - - fmt.Fprintln(wr, ".SH OPTIONS") - - writeManPageOptions(wr, p.Command.Group) - - if len(p.visibleCommands()) > 0 { - fmt.Fprintln(wr, ".SH COMMANDS") - - writeManPageSubcommands(wr, "", p.Name+" "+usage, p.Command) - } -} diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go deleted file mode 100644 index 96bb1a31d..000000000 --- a/vendor/github.com/jessevdk/go-flags/multitag.go +++ /dev/null @@ -1,140 +0,0 @@ -package flags - -import ( - "strconv" -) - -type multiTag struct { - value string - cache map[string][]string -} - -func newMultiTag(v string) multiTag { - return multiTag{ - value: v, - } -} - -func (x *multiTag) scan() (map[string][]string, error) { - v := x.value - - ret := make(map[string][]string) - - // This is mostly copied from reflect.StructTag.Get - for v != "" { - i := 0 - - // Skip whitespace - for i < len(v) && v[i] == ' ' { - i++ - } - - v = v[i:] - - if v == "" { - break - } - - // Scan to colon to find key - i = 0 - - for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' { - i++ - } - - if i >= len(v) { - return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value) - } - - if v[i] != ':' { - return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value) - } - - if i+1 >= len(v) { - return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value) - } - - if v[i+1] != '"' { - return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value) - } - - name := v[:i] - v = v[i+1:] - - // Scan quoted string to find value - i = 1 - - for i < len(v) && v[i] != '"' { - if v[i] == '\n' { - return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value) - } - - if v[i] == '\\' { - i++ - } - i++ - } - - if i >= len(v) { - return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value) - } - - val, err := strconv.Unquote(v[:i+1]) - - if err != nil { - return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value) - } - - v = v[i+1:] - - ret[name] = append(ret[name], val) - } - - return ret, nil -} - -func (x *multiTag) Parse() error { - vals, err := x.scan() - x.cache = vals - - return err -} - -func (x *multiTag) cached() map[string][]string { - if x.cache == nil { - cache, _ := x.scan() - - if cache == nil { - cache = make(map[string][]string) - } - - x.cache = cache - } - - return x.cache -} - -func (x *multiTag) Get(key string) string { - c := x.cached() - - if v, ok := c[key]; ok { - return v[len(v)-1] - } - - return "" -} - -func (x *multiTag) GetMany(key string) []string { - c := x.cached() - return c[key] -} - -func (x *multiTag) Set(key string, value string) { - c := x.cached() - c[key] = []string{value} -} - -func (x *multiTag) SetMany(key string, value []string) { - c := x.cached() - c[key] = value -} diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go deleted file mode 100644 index f6d694181..000000000 --- a/vendor/github.com/jessevdk/go-flags/option.go +++ /dev/null @@ -1,569 +0,0 @@ -package flags - -import ( - "bytes" - "fmt" - "os" - "reflect" - "strings" - "unicode/utf8" -) - -// Option flag information. Contains a description of the option, short and -// long name as well as a default value and whether an argument for this -// flag is optional. -type Option struct { - // The description of the option flag. This description is shown - // automatically in the built-in help. - Description string - - // The short name of the option (a single character). If not 0, the - // option flag can be 'activated' using -. Either ShortName - // or LongName needs to be non-empty. - ShortName rune - - // The long name of the option. If not "", the option flag can be - // activated using --. Either ShortName or LongName needs - // to be non-empty. - LongName string - - // The default value of the option. - Default []string - - // The optional environment default value key name. - EnvDefaultKey string - - // The optional delimiter string for EnvDefaultKey values. - EnvDefaultDelim string - - // If true, specifies that the argument to an option flag is optional. - // When no argument to the flag is specified on the command line, the - // value of OptionalValue will be set in the field this option represents. - // This is only valid for non-boolean options. - OptionalArgument bool - - // The optional value of the option. The optional value is used when - // the option flag is marked as having an OptionalArgument. This means - // that when the flag is specified, but no option argument is given, - // the value of the field this option represents will be set to - // OptionalValue. This is only valid for non-boolean options. - OptionalValue []string - - // If true, the option _must_ be specified on the command line. If the - // option is not specified, the parser will generate an ErrRequired type - // error. - Required bool - - // A name for the value of an option shown in the Help as --flag [ValueName] - ValueName string - - // A mask value to show in the help instead of the default value. This - // is useful for hiding sensitive information in the help, such as - // passwords. - DefaultMask string - - // If non empty, only a certain set of values is allowed for an option. - Choices []string - - // If true, the option is not displayed in the help or man page - Hidden bool - - // The group which the option belongs to - group *Group - - // The struct field which the option represents. - field reflect.StructField - - // The struct field value which the option represents. - value reflect.Value - - // Determines if the option will be always quoted in the INI output - iniQuote bool - - tag multiTag - isSet bool - isSetDefault bool - preventDefault bool - clearReferenceBeforeSet bool - - defaultLiteral string -} - -// LongNameWithNamespace returns the option's long name with the group namespaces -// prepended by walking up the option's group tree. Namespaces and the long name -// itself are separated by the parser's namespace delimiter. If the long name is -// empty an empty string is returned. -func (option *Option) LongNameWithNamespace() string { - if len(option.LongName) == 0 { - return "" - } - - // fetch the namespace delimiter from the parser which is always at the - // end of the group hierarchy - namespaceDelimiter := "" - g := option.group - - for { - if p, ok := g.parent.(*Parser); ok { - namespaceDelimiter = p.NamespaceDelimiter - - break - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - } - } - - // concatenate long name with namespace - longName := option.LongName - g = option.group - - for g != nil { - if g.Namespace != "" { - longName = g.Namespace + namespaceDelimiter + longName - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - case *Parser: - g = nil - } - } - - return longName -} - -// EnvKeyWithNamespace returns the option's env key with the group namespaces -// prepended by walking up the option's group tree. Namespaces and the env key -// itself are separated by the parser's namespace delimiter. If the env key is -// empty an empty string is returned. -func (option *Option) EnvKeyWithNamespace() string { - if len(option.EnvDefaultKey) == 0 { - return "" - } - - // fetch the namespace delimiter from the parser which is always at the - // end of the group hierarchy - namespaceDelimiter := "" - g := option.group - - for { - if p, ok := g.parent.(*Parser); ok { - namespaceDelimiter = p.EnvNamespaceDelimiter - - break - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - } - } - - // concatenate long name with namespace - key := option.EnvDefaultKey - g = option.group - - for g != nil { - if g.EnvNamespace != "" { - key = g.EnvNamespace + namespaceDelimiter + key - } - - switch i := g.parent.(type) { - case *Command: - g = i.Group - case *Group: - g = i - case *Parser: - g = nil - } - } - - return key -} - -// String converts an option to a human friendly readable string describing the -// option. -func (option *Option) String() string { - var s string - var short string - - if option.ShortName != 0 { - data := make([]byte, utf8.RuneLen(option.ShortName)) - utf8.EncodeRune(data, option.ShortName) - short = string(data) - - if len(option.LongName) != 0 { - s = fmt.Sprintf("%s%s, %s%s", - string(defaultShortOptDelimiter), short, - defaultLongOptDelimiter, option.LongNameWithNamespace()) - } else { - s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short) - } - } else if len(option.LongName) != 0 { - s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace()) - } - - return s -} - -// Value returns the option value as an interface{}. -func (option *Option) Value() interface{} { - return option.value.Interface() -} - -// Field returns the reflect struct field of the option. -func (option *Option) Field() reflect.StructField { - return option.field -} - -// IsSet returns true if option has been set -func (option *Option) IsSet() bool { - return option.isSet -} - -// IsSetDefault returns true if option has been set via the default option tag -func (option *Option) IsSetDefault() bool { - return option.isSetDefault -} - -// Set the value of an option to the specified value. An error will be returned -// if the specified value could not be converted to the corresponding option -// value type. -func (option *Option) set(value *string) error { - kind := option.value.Type().Kind() - - if (kind == reflect.Map || kind == reflect.Slice) && option.clearReferenceBeforeSet { - option.empty() - } - - option.isSet = true - option.preventDefault = true - option.clearReferenceBeforeSet = false - - if len(option.Choices) != 0 { - found := false - - for _, choice := range option.Choices { - if choice == *value { - found = true - break - } - } - - if !found { - allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ") - - if len(option.Choices) > 1 { - allowed += " or " + option.Choices[len(option.Choices)-1] - } - - return newErrorf(ErrInvalidChoice, - "Invalid value `%s' for option `%s'. Allowed values are: %s", - *value, option, allowed) - } - } - - if option.isFunc() { - return option.call(value) - } else if value != nil { - return convert(*value, option.value, option.tag) - } - - return convert("", option.value, option.tag) -} - -func (option *Option) setDefault(value *string) error { - if option.preventDefault { - return nil - } - - if err := option.set(value); err != nil { - return err - } - - option.isSetDefault = true - option.preventDefault = false - - return nil -} - -func (option *Option) showInHelp() bool { - return !option.Hidden && (option.ShortName != 0 || len(option.LongName) != 0) -} - -func (option *Option) canArgument() bool { - if u := option.isUnmarshaler(); u != nil { - return true - } - - return !option.isBool() -} - -func (option *Option) emptyValue() reflect.Value { - tp := option.value.Type() - - if tp.Kind() == reflect.Map { - return reflect.MakeMap(tp) - } - - return reflect.Zero(tp) -} - -func (option *Option) empty() { - if !option.isFunc() { - option.value.Set(option.emptyValue()) - } -} - -func (option *Option) clearDefault() error { - if option.preventDefault { - return nil - } - - usedDefault := option.Default - - if envKey := option.EnvKeyWithNamespace(); envKey != "" { - if value, ok := os.LookupEnv(envKey); ok { - if option.EnvDefaultDelim != "" { - usedDefault = strings.Split(value, option.EnvDefaultDelim) - } else { - usedDefault = []string{value} - } - } - } - - option.isSetDefault = true - - if len(usedDefault) > 0 { - option.empty() - - for _, d := range usedDefault { - err := option.setDefault(&d) - - if err != nil { - return err - } - } - } else { - tp := option.value.Type() - - switch tp.Kind() { - case reflect.Map: - if option.value.IsNil() { - option.empty() - } - case reflect.Slice: - if option.value.IsNil() { - option.empty() - } - } - } - - return nil -} - -func (option *Option) valueIsDefault() bool { - // Check if the value of the option corresponds to its - // default value - emptyval := option.emptyValue() - - checkvalptr := reflect.New(emptyval.Type()) - checkval := reflect.Indirect(checkvalptr) - - checkval.Set(emptyval) - - if len(option.Default) != 0 { - for _, v := range option.Default { - convert(v, checkval, option.tag) - } - } - - return reflect.DeepEqual(option.value.Interface(), checkval.Interface()) -} - -func (option *Option) isUnmarshaler() Unmarshaler { - v := option.value - - for { - if !v.CanInterface() { - break - } - - i := v.Interface() - - if u, ok := i.(Unmarshaler); ok { - return u - } - - if !v.CanAddr() { - break - } - - v = v.Addr() - } - - return nil -} - -func (option *Option) isValueValidator() ValueValidator { - v := option.value - - for { - if !v.CanInterface() { - break - } - - i := v.Interface() - - if u, ok := i.(ValueValidator); ok { - return u - } - - if !v.CanAddr() { - break - } - - v = v.Addr() - } - - return nil -} - -func (option *Option) isBool() bool { - tp := option.value.Type() - - for { - switch tp.Kind() { - case reflect.Slice, reflect.Ptr: - tp = tp.Elem() - case reflect.Bool: - return true - case reflect.Func: - return tp.NumIn() == 0 - default: - return false - } - } -} - -func (option *Option) isSignedNumber() bool { - tp := option.value.Type() - - for { - switch tp.Kind() { - case reflect.Slice, reflect.Ptr: - tp = tp.Elem() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64: - return true - default: - return false - } - } -} - -func (option *Option) isFunc() bool { - return option.value.Type().Kind() == reflect.Func -} - -func (option *Option) call(value *string) error { - var retval []reflect.Value - - if value == nil { - retval = option.value.Call(nil) - } else { - tp := option.value.Type().In(0) - - val := reflect.New(tp) - val = reflect.Indirect(val) - - if err := convert(*value, val, option.tag); err != nil { - return err - } - - retval = option.value.Call([]reflect.Value{val}) - } - - if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() { - if retval[0].Interface() == nil { - return nil - } - - return retval[0].Interface().(error) - } - - return nil -} - -func (option *Option) updateDefaultLiteral() { - defs := option.Default - def := "" - - if len(defs) == 0 && option.canArgument() { - var showdef bool - - switch option.field.Type.Kind() { - case reflect.Func, reflect.Ptr: - showdef = !option.value.IsNil() - case reflect.Slice, reflect.String, reflect.Array: - showdef = option.value.Len() > 0 - case reflect.Map: - showdef = !option.value.IsNil() && option.value.Len() > 0 - default: - zeroval := reflect.Zero(option.field.Type) - showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface()) - } - - if showdef { - def, _ = convertToString(option.value, option.tag) - } - } else if len(defs) != 0 { - l := len(defs) - 1 - - for i := 0; i < l; i++ { - def += quoteIfNeeded(defs[i]) + ", " - } - - def += quoteIfNeeded(defs[l]) - } - - option.defaultLiteral = def -} - -func (option *Option) shortAndLongName() string { - ret := &bytes.Buffer{} - - if option.ShortName != 0 { - ret.WriteRune(defaultShortOptDelimiter) - ret.WriteRune(option.ShortName) - } - - if len(option.LongName) != 0 { - if option.ShortName != 0 { - ret.WriteRune('/') - } - - ret.WriteString(option.LongName) - } - - return ret.String() -} - -func (option *Option) isValidValue(arg string) error { - if validator := option.isValueValidator(); validator != nil { - return validator.IsValidValue(arg) - } - if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') { - return fmt.Errorf("expected argument for flag `%s', but got option `%s'", option, arg) - } - return nil -} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go deleted file mode 100644 index 56dfdae12..000000000 --- a/vendor/github.com/jessevdk/go-flags/optstyle_other.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !windows forceposix - -package flags - -import ( - "strings" -) - -const ( - defaultShortOptDelimiter = '-' - defaultLongOptDelimiter = "--" - defaultNameArgDelimiter = '=' -) - -func argumentStartsOption(arg string) bool { - return len(arg) > 0 && arg[0] == '-' -} - -func argumentIsOption(arg string) bool { - if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { - return true - } - - if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { - return true - } - - return false -} - -// stripOptionPrefix returns the option without the prefix and whether or -// not the option is a long option or not. -func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { - if strings.HasPrefix(optname, "--") { - return "--", optname[2:], true - } else if strings.HasPrefix(optname, "-") { - return "-", optname[1:], false - } - - return "", optname, false -} - -// splitOption attempts to split the passed option into a name and an argument. -// When there is no argument specified, nil will be returned for it. -func splitOption(prefix string, option string, islong bool) (string, string, *string) { - pos := strings.Index(option, "=") - - if (islong && pos >= 0) || (!islong && pos == 1) { - rest := option[pos+1:] - return option[:pos], "=", &rest - } - - return option, "", nil -} - -// addHelpGroup adds a new group that contains default help parameters. -func (c *Command) addHelpGroup(showHelp func() error) *Group { - var help struct { - ShowHelp func() error `short:"h" long:"help" description:"Show this help message"` - } - - help.ShowHelp = showHelp - ret, _ := c.AddGroup("Help Options", "", &help) - ret.isBuiltinHelp = true - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go deleted file mode 100644 index f3f28aeef..000000000 --- a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build !forceposix - -package flags - -import ( - "strings" -) - -// Windows uses a front slash for both short and long options. Also it uses -// a colon for name/argument delimter. -const ( - defaultShortOptDelimiter = '/' - defaultLongOptDelimiter = "/" - defaultNameArgDelimiter = ':' -) - -func argumentStartsOption(arg string) bool { - return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/') -} - -func argumentIsOption(arg string) bool { - // Windows-style options allow front slash for the option - // delimiter. - if len(arg) > 1 && arg[0] == '/' { - return true - } - - if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' { - return true - } - - if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' { - return true - } - - return false -} - -// stripOptionPrefix returns the option without the prefix and whether or -// not the option is a long option or not. -func stripOptionPrefix(optname string) (prefix string, name string, islong bool) { - // Determine if the argument is a long option or not. Windows - // typically supports both long and short options with a single - // front slash as the option delimiter, so handle this situation - // nicely. - possplit := 0 - - if strings.HasPrefix(optname, "--") { - possplit = 2 - islong = true - } else if strings.HasPrefix(optname, "-") { - possplit = 1 - islong = false - } else if strings.HasPrefix(optname, "/") { - possplit = 1 - islong = len(optname) > 2 - } - - return optname[:possplit], optname[possplit:], islong -} - -// splitOption attempts to split the passed option into a name and an argument. -// When there is no argument specified, nil will be returned for it. -func splitOption(prefix string, option string, islong bool) (string, string, *string) { - if len(option) == 0 { - return option, "", nil - } - - // Windows typically uses a colon for the option name and argument - // delimiter while POSIX typically uses an equals. Support both styles, - // but don't allow the two to be mixed. That is to say /foo:bar and - // --foo=bar are acceptable, but /foo=bar and --foo:bar are not. - var pos int - var sp string - - if prefix == "/" { - sp = ":" - pos = strings.Index(option, sp) - } else if len(prefix) > 0 { - sp = "=" - pos = strings.Index(option, sp) - } - - if (islong && pos >= 0) || (!islong && pos == 1) { - rest := option[pos+1:] - return option[:pos], sp, &rest - } - - return option, "", nil -} - -// addHelpGroup adds a new group that contains default help parameters. -func (c *Command) addHelpGroup(showHelp func() error) *Group { - // Windows CLI applications typically use /? for help, so make both - // that available as well as the POSIX style h and help. - var help struct { - ShowHelpWindows func() error `short:"?" description:"Show this help message"` - ShowHelpPosix func() error `short:"h" long:"help" description:"Show this help message"` - } - - help.ShowHelpWindows = showHelp - help.ShowHelpPosix = showHelp - - ret, _ := c.AddGroup("Help Options", "", &help) - ret.isBuiltinHelp = true - - return ret -} diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go deleted file mode 100644 index 3fc3f7ba1..000000000 --- a/vendor/github.com/jessevdk/go-flags/parser.go +++ /dev/null @@ -1,714 +0,0 @@ -// Copyright 2012 Jesse van den Kieboom. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -import ( - "bytes" - "fmt" - "os" - "path" - "reflect" - "sort" - "strings" - "unicode/utf8" -) - -// A Parser provides command line option parsing. It can contain several -// option groups each with their own set of options. -type Parser struct { - // Embedded, see Command for more information - *Command - - // A usage string to be displayed in the help message. - Usage string - - // Option flags changing the behavior of the parser. - Options Options - - // NamespaceDelimiter separates group namespaces and option long names - NamespaceDelimiter string - - // EnvNamespaceDelimiter separates group env namespaces and env keys - EnvNamespaceDelimiter string - - // UnknownOptionsHandler is a function which gets called when the parser - // encounters an unknown option. The function receives the unknown option - // name, a SplitArgument which specifies its value if set with an argument - // separator, and the remaining command line arguments. - // It should return a new list of remaining arguments to continue parsing, - // or an error to indicate a parse failure. - UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error) - - // CompletionHandler is a function gets called to handle the completion of - // items. By default, the items are printed and the application is exited. - // You can override this default behavior by specifying a custom CompletionHandler. - CompletionHandler func(items []Completion) - - // CommandHandler is a function that gets called to handle execution of a - // command. By default, the command will simply be executed. This can be - // overridden to perform certain actions (such as applying global flags) - // just before the command is executed. Note that if you override the - // handler it is your responsibility to call the command.Execute function. - // - // The command passed into CommandHandler may be nil in case there is no - // command to be executed when parsing has finished. - CommandHandler func(command Commander, args []string) error - - internalError error -} - -// SplitArgument represents the argument value of an option that was passed using -// an argument separator. -type SplitArgument interface { - // String returns the option's value as a string, and a boolean indicating - // if the option was present. - Value() (string, bool) -} - -type strArgument struct { - value *string -} - -func (s strArgument) Value() (string, bool) { - if s.value == nil { - return "", false - } - - return *s.value, true -} - -// Options provides parser options that change the behavior of the option -// parser. -type Options uint - -const ( - // None indicates no options. - None Options = 0 - - // HelpFlag adds a default Help Options group to the parser containing - // -h and --help options. When either -h or --help is specified on the - // command line, the parser will return the special error of type - // ErrHelp. When PrintErrors is also specified, then the help message - // will also be automatically printed to os.Stdout. - HelpFlag = 1 << iota - - // PassDoubleDash passes all arguments after a double dash, --, as - // remaining command line arguments (i.e. they will not be parsed for - // flags). - PassDoubleDash - - // IgnoreUnknown ignores any unknown options and passes them as - // remaining command line arguments instead of generating an error. - IgnoreUnknown - - // PrintErrors prints any errors which occurred during parsing to - // os.Stderr. In the special case of ErrHelp, the message will be printed - // to os.Stdout. - PrintErrors - - // PassAfterNonOption passes all arguments after the first non option - // as remaining command line arguments. This is equivalent to strict - // POSIX processing. - PassAfterNonOption - - // Default is a convenient default set of options which should cover - // most of the uses of the flags package. - Default = HelpFlag | PrintErrors | PassDoubleDash -) - -type parseState struct { - arg string - args []string - retargs []string - positional []*Arg - err error - - command *Command - lookup lookup -} - -// Parse is a convenience function to parse command line options with default -// settings. The provided data is a pointer to a struct representing the -// default option group (named "Application Options"). For more control, use -// flags.NewParser. -func Parse(data interface{}) ([]string, error) { - return NewParser(data, Default).Parse() -} - -// ParseArgs is a convenience function to parse command line options with default -// settings. The provided data is a pointer to a struct representing the -// default option group (named "Application Options"). The args argument is -// the list of command line arguments to parse. If you just want to parse the -// default program command line arguments (i.e. os.Args), then use flags.Parse -// instead. For more control, use flags.NewParser. -func ParseArgs(data interface{}, args []string) ([]string, error) { - return NewParser(data, Default).ParseArgs(args) -} - -// NewParser creates a new parser. It uses os.Args[0] as the application -// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for -// more details). The provided data is a pointer to a struct representing the -// default option group (named "Application Options"), or nil if the default -// group should not be added. The options parameter specifies a set of options -// for the parser. -func NewParser(data interface{}, options Options) *Parser { - p := NewNamedParser(path.Base(os.Args[0]), options) - - if data != nil { - g, err := p.AddGroup("Application Options", "", data) - - if err == nil { - g.parent = p - } - - p.internalError = err - } - - return p -} - -// NewNamedParser creates a new parser. The appname is used to display the -// executable name in the built-in help message. Option groups and commands can -// be added to this parser by using AddGroup and AddCommand. -func NewNamedParser(appname string, options Options) *Parser { - p := &Parser{ - Command: newCommand(appname, "", "", nil), - Options: options, - NamespaceDelimiter: ".", - EnvNamespaceDelimiter: "_", - } - - p.Command.parent = p - - return p -} - -// Parse parses the command line arguments from os.Args using Parser.ParseArgs. -// For more detailed information see ParseArgs. -func (p *Parser) Parse() ([]string, error) { - return p.ParseArgs(os.Args[1:]) -} - -// ParseArgs parses the command line arguments according to the option groups that -// were added to the parser. On successful parsing of the arguments, the -// remaining, non-option, arguments (if any) are returned. The returned error -// indicates a parsing error and can be used with PrintError to display -// contextual information on where the error occurred exactly. -// -// When the common help group has been added (AddHelp) and either -h or --help -// was specified in the command line arguments, a help message will be -// automatically printed if the PrintErrors option is enabled. -// Furthermore, the special error type ErrHelp is returned. -// It is up to the caller to exit the program if so desired. -func (p *Parser) ParseArgs(args []string) ([]string, error) { - if p.internalError != nil { - return nil, p.internalError - } - - p.eachOption(func(c *Command, g *Group, option *Option) { - option.clearReferenceBeforeSet = true - option.updateDefaultLiteral() - }) - - // Add built-in help group to all commands if necessary - if (p.Options & HelpFlag) != None { - p.addHelpGroups(p.showBuiltinHelp) - } - - compval := os.Getenv("GO_FLAGS_COMPLETION") - - if len(compval) != 0 { - comp := &completion{parser: p} - items := comp.complete(args) - - if p.CompletionHandler != nil { - p.CompletionHandler(items) - } else { - comp.print(items, compval == "verbose") - os.Exit(0) - } - - return nil, nil - } - - s := &parseState{ - args: args, - retargs: make([]string, 0, len(args)), - } - - p.fillParseState(s) - - for !s.eof() { - var err error - arg := s.pop() - - // When PassDoubleDash is set and we encounter a --, then - // simply append all the rest as arguments and break out - if (p.Options&PassDoubleDash) != None && arg == "--" { - s.addArgs(s.args...) - break - } - - if !argumentIsOption(arg) { - if (p.Options&PassAfterNonOption) != None && s.lookup.commands[arg] == nil { - // If PassAfterNonOption is set then all remaining arguments - // are considered positional - if err = s.addArgs(s.arg); err != nil { - break - } - - if err = s.addArgs(s.args...); err != nil { - break - } - - break - } - - // Note: this also sets s.err, so we can just check for - // nil here and use s.err later - if p.parseNonOption(s) != nil { - break - } - - continue - } - - prefix, optname, islong := stripOptionPrefix(arg) - optname, _, argument := splitOption(prefix, optname, islong) - - if islong { - err = p.parseLong(s, optname, argument) - } else { - err = p.parseShort(s, optname, argument) - } - - if err != nil { - ignoreUnknown := (p.Options & IgnoreUnknown) != None - parseErr := wrapError(err) - - if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) { - s.err = parseErr - break - } - - if ignoreUnknown { - s.addArgs(arg) - } else if p.UnknownOptionHandler != nil { - modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args) - - if err != nil { - s.err = err - break - } - - s.args = modifiedArgs - } - } - } - - if s.err == nil { - p.eachOption(func(c *Command, g *Group, option *Option) { - err := option.clearDefault() - if err != nil { - if _, ok := err.(*Error); !ok { - err = p.marshalError(option, err) - } - s.err = err - } - }) - - s.checkRequired(p) - } - - var reterr error - - if s.err != nil { - reterr = s.err - } else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional { - reterr = s.estimateCommand() - } else if cmd, ok := s.command.data.(Commander); ok { - if p.CommandHandler != nil { - reterr = p.CommandHandler(cmd, s.retargs) - } else { - reterr = cmd.Execute(s.retargs) - } - } else if p.CommandHandler != nil { - reterr = p.CommandHandler(nil, s.retargs) - } - - if reterr != nil { - var retargs []string - - if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp { - retargs = append([]string{s.arg}, s.args...) - } else { - retargs = s.args - } - - return retargs, p.printError(reterr) - } - - return s.retargs, nil -} - -func (p *parseState) eof() bool { - return len(p.args) == 0 -} - -func (p *parseState) pop() string { - if p.eof() { - return "" - } - - p.arg = p.args[0] - p.args = p.args[1:] - - return p.arg -} - -func (p *parseState) peek() string { - if p.eof() { - return "" - } - - return p.args[0] -} - -func (p *parseState) checkRequired(parser *Parser) error { - c := parser.Command - - var required []*Option - - for c != nil { - c.eachGroup(func(g *Group) { - for _, option := range g.options { - if !option.isSet && option.Required { - required = append(required, option) - } - } - }) - - c = c.Active - } - - if len(required) == 0 { - if len(p.positional) > 0 { - var reqnames []string - - for _, arg := range p.positional { - argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1 - - if !argRequired { - continue - } - - if arg.isRemaining() { - if arg.value.Len() < arg.Required { - var arguments string - - if arg.Required > 1 { - arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len()) - } else { - arguments = "argument" - } - - reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`") - } else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum { - if arg.RequiredMaximum == 0 { - reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`") - } else { - var arguments string - - if arg.RequiredMaximum > 1 { - arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len()) - } else { - arguments = "argument" - } - - reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`") - } - } - } else { - reqnames = append(reqnames, "`"+arg.Name+"`") - } - } - - if len(reqnames) == 0 { - return nil - } - - var msg string - - if len(reqnames) == 1 { - msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0]) - } else { - msg = fmt.Sprintf("the required arguments %s and %s were not provided", - strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1]) - } - - p.err = newError(ErrRequired, msg) - return p.err - } - - return nil - } - - names := make([]string, 0, len(required)) - - for _, k := range required { - names = append(names, "`"+k.String()+"'") - } - - sort.Strings(names) - - var msg string - - if len(names) == 1 { - msg = fmt.Sprintf("the required flag %s was not specified", names[0]) - } else { - msg = fmt.Sprintf("the required flags %s and %s were not specified", - strings.Join(names[:len(names)-1], ", "), names[len(names)-1]) - } - - p.err = newError(ErrRequired, msg) - return p.err -} - -func (p *parseState) estimateCommand() error { - commands := p.command.sortedVisibleCommands() - cmdnames := make([]string, len(commands)) - - for i, v := range commands { - cmdnames[i] = v.Name - } - - var msg string - var errtype ErrorType - - if len(p.retargs) != 0 { - c, l := closestChoice(p.retargs[0], cmdnames) - msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0]) - errtype = ErrUnknownCommand - - if float32(l)/float32(len(c)) < 0.5 { - msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c) - } else if len(cmdnames) == 1 { - msg = fmt.Sprintf("%s. You should use the %s command", - msg, - cmdnames[0]) - } else if len(cmdnames) > 1 { - msg = fmt.Sprintf("%s. Please specify one command of: %s or %s", - msg, - strings.Join(cmdnames[:len(cmdnames)-1], ", "), - cmdnames[len(cmdnames)-1]) - } - } else { - errtype = ErrCommandRequired - - if len(cmdnames) == 1 { - msg = fmt.Sprintf("Please specify the %s command", cmdnames[0]) - } else if len(cmdnames) > 1 { - msg = fmt.Sprintf("Please specify one command of: %s or %s", - strings.Join(cmdnames[:len(cmdnames)-1], ", "), - cmdnames[len(cmdnames)-1]) - } - } - - return newError(errtype, msg) -} - -func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) { - if !option.canArgument() { - if argument != nil { - return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option) - } - - err = option.set(nil) - } else if argument != nil || (canarg && !s.eof()) { - var arg string - - if argument != nil { - arg = *argument - } else { - arg = s.pop() - - if validationErr := option.isValidValue(arg); validationErr != nil { - return newErrorf(ErrExpectedArgument, validationErr.Error()) - } else if p.Options&PassDoubleDash != 0 && arg == "--" { - return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option) - } - } - - if option.tag.Get("unquote") != "false" { - arg, err = unquoteIfPossible(arg) - } - - if err == nil { - err = option.set(&arg) - } - } else if option.OptionalArgument { - option.empty() - - for _, v := range option.OptionalValue { - err = option.set(&v) - - if err != nil { - break - } - } - } else { - err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option) - } - - if err != nil { - if _, ok := err.(*Error); !ok { - err = p.marshalError(option, err) - } - } - - return err -} - -func (p *Parser) marshalError(option *Option, err error) *Error { - s := "invalid argument for flag `%s'" - - expected := p.expectedType(option) - - if expected != "" { - s = s + " (expected " + expected + ")" - } - - return newErrorf(ErrMarshal, s+": %s", - option, - err.Error()) -} - -func (p *Parser) expectedType(option *Option) string { - valueType := option.value.Type() - - if valueType.Kind() == reflect.Func { - return "" - } - - return valueType.String() -} - -func (p *Parser) parseLong(s *parseState, name string, argument *string) error { - if option := s.lookup.longNames[name]; option != nil { - // Only long options that are required can consume an argument - // from the argument list - canarg := !option.OptionalArgument - - return p.parseOption(s, name, option, canarg, argument) - } - - return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name) -} - -func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) { - c, n := utf8.DecodeRuneInString(optname) - - if n == len(optname) { - return optname, nil - } - - first := string(c) - - if option := s.lookup.shortNames[first]; option != nil && option.canArgument() { - arg := optname[n:] - return first, &arg - } - - return optname, nil -} - -func (p *Parser) parseShort(s *parseState, optname string, argument *string) error { - if argument == nil { - optname, argument = p.splitShortConcatArg(s, optname) - } - - for i, c := range optname { - shortname := string(c) - - if option := s.lookup.shortNames[shortname]; option != nil { - // Only the last short argument can consume an argument from - // the arguments list, and only if it's non optional - canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument - - if err := p.parseOption(s, shortname, option, canarg, argument); err != nil { - return err - } - } else { - return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname) - } - - // Only the first option can have a concatted argument, so just - // clear argument here - argument = nil - } - - return nil -} - -func (p *parseState) addArgs(args ...string) error { - for len(p.positional) > 0 && len(args) > 0 { - arg := p.positional[0] - - if err := convert(args[0], arg.value, arg.tag); err != nil { - p.err = err - return err - } - - if !arg.isRemaining() { - p.positional = p.positional[1:] - } - - args = args[1:] - } - - p.retargs = append(p.retargs, args...) - return nil -} - -func (p *Parser) parseNonOption(s *parseState) error { - if len(s.positional) > 0 { - return s.addArgs(s.arg) - } - - if len(s.command.commands) > 0 && len(s.retargs) == 0 { - if cmd := s.lookup.commands[s.arg]; cmd != nil { - s.command.Active = cmd - cmd.fillParseState(s) - - return nil - } else if !s.command.SubcommandsOptional { - s.addArgs(s.arg) - return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg) - } - } - - return s.addArgs(s.arg) -} - -func (p *Parser) showBuiltinHelp() error { - var b bytes.Buffer - - p.WriteHelp(&b) - return newError(ErrHelp, b.String()) -} - -func (p *Parser) printError(err error) error { - if err != nil && (p.Options&PrintErrors) != None { - flagsErr, ok := err.(*Error) - - if ok && flagsErr.Type == ErrHelp { - fmt.Fprintln(os.Stdout, err) - } else { - fmt.Fprintln(os.Stderr, err) - } - } - - return err -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go deleted file mode 100644 index 829e477ad..000000000 --- a/vendor/github.com/jessevdk/go-flags/termsize.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows,!plan9,!appengine,!wasm - -package flags - -import ( - "golang.org/x/sys/unix" -) - -func getTerminalColumns() int { - ws, err := unix.IoctlGetWinsize(0, unix.TIOCGWINSZ) - if err != nil { - return 80 - } - return int(ws.Col) -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go deleted file mode 100644 index c1ff18673..000000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build plan9 appengine wasm - -package flags - -func getTerminalColumns() int { - return 80 -} diff --git a/vendor/github.com/jessevdk/go-flags/termsize_windows.go b/vendor/github.com/jessevdk/go-flags/termsize_windows.go deleted file mode 100644 index 5c0fa6ba2..000000000 --- a/vendor/github.com/jessevdk/go-flags/termsize_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -// +build windows - -package flags - -import ( - "syscall" - "unsafe" -) - -type ( - SHORT int16 - WORD uint16 - - SMALL_RECT struct { - Left SHORT - Top SHORT - Right SHORT - Bottom SHORT - } - - COORD struct { - X SHORT - Y SHORT - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes WORD - Window SMALL_RECT - MaximumWindowSize COORD - } -) - -var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") -var getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - -func getError(r1, r2 uintptr, lastErr error) error { - // If the function fails, the return value is zero. - if r1 == 0 { - if lastErr != nil { - return lastErr - } - return syscall.EINVAL - } - return nil -} - -func getStdHandle(stdhandle int) (uintptr, error) { - handle, err := syscall.GetStdHandle(stdhandle) - if err != nil { - return 0, err - } - return uintptr(handle), nil -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - var info CONSOLE_SCREEN_BUFFER_INFO - if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { - return nil, err - } - return &info, nil -} - -func getTerminalColumns() int { - defaultWidth := 80 - - stdoutHandle, err := getStdHandle(syscall.STD_OUTPUT_HANDLE) - if err != nil { - return defaultWidth - } - - info, err := GetConsoleScreenBufferInfo(stdoutHandle) - if err != nil { - return defaultWidth - } - - if info.MaximumWindowSize.X > 0 { - return int(info.MaximumWindowSize.X) - } - - return defaultWidth -} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes deleted file mode 100644 index 402433593..000000000 --- a/vendor/github.com/klauspost/compress/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -* -text -*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore deleted file mode 100644 index d31b37815..000000000 --- a/vendor/github.com/klauspost/compress/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -/s2/cmd/_s2sx/sfx-exe - -# Linux perf files -perf.data -perf.data.old - -# gdb history -.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml deleted file mode 100644 index 4528059ca..000000000 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ /dev/null @@ -1,123 +0,0 @@ -version: 2 - -before: - hooks: - - ./gen.sh - -builds: - - - id: "s2c" - binary: s2c - main: ./s2/cmd/s2c/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2d" - binary: s2d - main: ./s2/cmd/s2d/main.go - flags: - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - - - id: "s2sx" - binary: s2sx - main: ./s2/cmd/_s2sx/main.go - flags: - - -modfile=s2sx.mod - - -trimpath - env: - - CGO_ENABLED=0 - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - -archives: - - - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - format_overrides: - - goos: windows - format: zip - files: - - unpack/* - - s2/LICENSE - - s2/README.md -checksum: - name_template: 'checksums.txt' -snapshot: - version_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/compress - maintainer: Klaus Post - description: S2 Compression Tool - license: BSD 3-Clause - formats: - - deb - - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 87d557477..000000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,304 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md deleted file mode 100644 index de264c85a..000000000 --- a/vendor/github.com/klauspost/compress/README.md +++ /dev/null @@ -1,721 +0,0 @@ -# compress - -This package provides various compression algorithms. - -* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. -* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). -* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. -* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. -* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. -* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. - -[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) -[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) - -# changelog - -* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) - * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 - * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 - * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 - * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 - * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 - -* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) - * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 - * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 - * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 - * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 - -* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) - * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 - * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 - -* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) - * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 - * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 - -* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) - * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 - * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 - -* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) - * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 - * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 - * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 - * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 - * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 -https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 - -* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) - * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 - * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 - * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 - * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 - * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 - -* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) - * fse: Fix max header size https://github.com/klauspost/compress/pull/881 - * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 - * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 - -* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) - * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 - -* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 - * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 - -* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) - * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 - * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 - * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 - * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 - * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 - * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - -
- See changes to v1.16.x - - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -To disable all assembly add `-tags=noasm`. This works across all packages. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -```go - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b..000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d5..000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go deleted file mode 100644 index af53fb860..000000000 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ /dev/null @@ -1,1017 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - NoCompression = 0 - BestSpeed = 1 - BestCompression = 9 - DefaultCompression = -1 - - // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman - // entropy encoding. This mode is useful in compressing data that has - // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) - // that lacks an entropy encoder. Compression gains are achieved when - // certain bytes in the input stream occur more frequently than others. - // - // Note that HuffmanOnly produces a compressed output that is - // RFC 1951 compliant. That is, any valid DEFLATE decompressor will - // continue to be able to decompress this output. - HuffmanOnly = -2 - ConstantCompression = HuffmanOnly // compatibility alias. - - logWindowSize = 15 - windowSize = 1 << logWindowSize - windowMask = windowSize - 1 - logMaxOffsetSize = 15 // Standard DEFLATE - minMatchLength = 4 // The smallest match that the compressor looks for - maxMatchLength = 258 // The longest match for the compressor - minOffsetSize = 1 // The shortest offset that makes any sense - - // The maximum number of tokens we will encode at the time. - // Smaller sizes usually creates less optimal blocks. - // Bigger can make context switching slow. - // We use this for levels 7-9, so we make it big. - maxFlateBlockTokens = 1 << 15 - maxStoreBlockSize = 65535 - hashBits = 17 // After 17 performance degrades - hashSize = 1 << hashBits - hashMask = (1 << hashBits) - 1 - hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 28 - - skipNever = math.MaxInt32 - - debugDeflate = false -) - -type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int -} - -// Compression levels have been rebalanced from zlib deflate defaults -// to give a bigger spread in speed and compression. -// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ -var levels = []compressionLevel{ - {}, // 0 - // Level 1-6 uses specialized algorithm - values not used - {0, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 2}, - {0, 0, 0, 0, 0, 3}, - {0, 0, 0, 0, 0, 4}, - {0, 0, 0, 0, 0, 5}, - {0, 0, 0, 0, 0, 6}, - // Levels 7-9 use increasingly more lazy matching - // and increasingly stringent conditions for "good enough". - {8, 12, 16, 24, skipNever, 7}, - {16, 30, 40, 64, skipNever, 8}, - {32, 258, 258, 1024, skipNever, 9}, -} - -// advancedState contains state for the advanced levels, with bigger hash tables, etc. -type advancedState struct { - // deflate state - length int - offset int - maxInsertIndex int - chainHead int - hashOffset int - - ii uint16 // position of last match, intended to overflow to reset. - - // input window: unprocessed data is window[index:windowEnd] - index int - hashMatch [maxMatchLength + minMatchLength]uint32 - - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 -} - -type compressor struct { - compressionLevel - - h *huffmanEncoder - w *huffmanBitWriter - - // compression algorithm - fill func(*compressor, []byte) int // copy data to window - step func(*compressor) // process window - - window []byte - windowEnd int - blockStart int // window index where current tokens start - err error - - // queued output tokens - tokens tokens - fast fastEnc - state *advancedState - - sync bool // requesting flush - byteAvailable bool // if true, still need to process window[index-1]. -} - -func (d *compressor) fillDeflate(b []byte) int { - s := d.state - if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { - // shift the window by windowSize - //copy(d.window[:], d.window[windowSize:2*windowSize]) - *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) - s.index -= windowSize - d.windowEnd -= windowSize - if d.blockStart >= windowSize { - d.blockStart -= windowSize - } else { - d.blockStart = math.MaxInt32 - } - s.hashOffset += windowSize - if s.hashOffset > maxHashOffset { - delta := s.hashOffset - 1 - s.hashOffset -= delta - s.chainHead -= delta - // Iterate over slices instead of arrays to avoid copying - // the entire table onto the stack (Issue #18625). - for i, v := range s.hashPrev[:] { - if int(v) > delta { - s.hashPrev[i] = uint32(int(v) - delta) - } else { - s.hashPrev[i] = 0 - } - } - for i, v := range s.hashHead[:] { - if int(v) > delta { - s.hashHead[i] = uint32(int(v) - delta) - } else { - s.hashHead[i] = 0 - } - } - } - } - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index - //d.w.writeBlock(tok, eof, window) - d.w.writeBlockDynamic(tok, eof, window, d.sync) - return d.w.err - } - return nil -} - -// writeBlockSkip writes the current block and uses the number of tokens -// to determine if the block should be stored on no matches, or -// only huffman encoded. -func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { - d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err - } - return nil -} - -// fillWindow will fill the current window with the supplied -// dictionary and calculate all hashes. -// This is much faster than doing a full encode. -// Should only be used after a start/reset. -func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 && d.level > -MinCustomWindowSize { - return - } - if d.fast != nil { - // encode the last data, but discard the result - if len(b) > maxMatchOffset { - b = b[len(b)-maxMatchOffset:] - } - d.fast.Encode(&d.tokens, b) - d.tokens.Reset() - return - } - s := d.state - // If we are given too much, cut it. - if len(b) > windowSize { - b = b[len(b)-windowSize:] - } - // Add all to window. - n := copy(d.window[d.windowEnd:], b) - - // Calculate 256 hashes at the time (more L1 cache hits) - loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { - startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - - if dstSize <= 0 { - continue - } - - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - // Update window information. - d.windowEnd += n - s.index = n -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = minMatchLength - 1 - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } - offset = 0 - - if d.chain < 100 { - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return - } - - // Minimum gain to accept a match. - cGain := 4 - - // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 3 - // Base is 4 bytes at with an additional cost. - // Matches must be better than this. - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - // Calculate gain. Estimate - newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - - //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) - if newGain > cGain { - length = n - offset = pos - i - cGain = newGain - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return -} - -func (d *compressor) writeStoredBlock(buf []byte) error { - if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { - return d.w.err - } - d.w.writeBytes(buf) - return d.w.err -} - -// hash4 returns a hash representation of the first 4 bytes -// of the supplied slice. -// The caller must ensure that len(b) >= 4. -func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> (32 - h) -} - -// bulkHash4 will compute hashes using the same -// algorithm as hash4 -func bulkHash4(b []byte, dst []uint32) { - if len(b) < 4 { - return - } - hb := binary.LittleEndian.Uint32(b) - - dst[0] = hash4u(hb, hashBits) - end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb >> 8) | uint32(b[i+3])<<24 - dst[i] = hash4u(hb, hashBits) - } -} - -func (d *compressor) initDeflate() { - d.window = make([]byte, 2*windowSize) - d.byteAvailable = false - d.err = nil - if d.state == nil { - return - } - s := d.state - s.index = 0 - s.hashOffset = 1 - s.length = minMatchLength - 1 - s.offset = 0 - s.chainHead = -1 -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazy() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = debugDeflate - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - if d.windowEnd != s.index && d.chain > 100 { - // Get literal huffman coder. - if d.h == nil { - d.h = newHuffmanEncoder(maxFlateBlockTokens) - } - var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { - tmp[v]++ - } - d.h.generate(tmp[:], 15) - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - hash := hash4(d.window[s.index:]) - ch := s.hashHead[hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - - if prevLength >= minMatchLength && s.length <= prevLength { - // No better match, but check for better match at end... - // - // Skip forward a number of bytes. - // Offset of 2 seems to yield best results. 3 is sometimes better. - const checkOff = 2 - - // Check all, except full length - if prevLength < maxMatchLength-checkOff { - prevIndex := s.index - 1 - if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } - end += prevIndex - - // Hash at match end. - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 - - // Extend back... - for i := checkOff - 1; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } else if false { - // Check one further ahead. - // Only rarely better, disabled for now. - prevIndex++ - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength+checkOff { - prevLength = length - prevOffset = prevIndex - ch2 - prevIndex-- - - // Extend back... - for i := checkOff; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } - } - } - } - } - } - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - newIndex := s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.ii = 0 - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if n := int(s.ii) - d.chain; n > 0 { - n = 1 + int(n>>6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - // Index... - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - s.index++ - } - // Flush last byte - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -func (d *compressor) store() { - if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.windowEnd = 0 - } -} - -// fillWindow will fill the buffer with data for huffman-only compression. -// The number of bytes copied is returned. -func (d *compressor) fillBlock(b []byte) int { - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -// storeHuff will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 -} - -// storeFast will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. - if d.windowEnd < len(d.window) { - if !d.sync { - return - } - // Handle extremely small sizes. - if d.windowEnd < 128 { - if d.windowEnd == 0 { - return - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 - d.fast.Reset() - return - } - } - - d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. - if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { - d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 -} - -// write will add input byte to the stream. -// Unless an error occurs all bytes will be consumed. -func (d *compressor) write(b []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - n = len(b) - for len(b) > 0 { - if d.windowEnd == len(d.window) || d.sync { - d.step(d) - } - b = b[d.fill(d, b):] - if d.err != nil { - return 0, d.err - } - } - return n, d.err -} - -func (d *compressor) syncFlush() error { - d.sync = true - if d.err != nil { - return d.err - } - d.step(d) - if d.err == nil { - d.w.writeStoredHeader(0, false) - d.w.flush() - d.err = d.w.err - } - d.sync = false - return d.err -} - -func (d *compressor) init(w io.Writer, level int) (err error) { - d.w = newHuffmanBitWriter(w) - - switch { - case level == NoCompression: - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: - d.w.logNewTablePenalty = 10 - d.window = make([]byte, 32<<10) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff - case level == DefaultCompression: - level = 5 - fallthrough - case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 7 - d.fast = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 8 - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate - d.step = (*compressor).deflateLazy - case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: - d.w.logNewTablePenalty = 7 - d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - default: - return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) - } - d.level = level - return nil -} - -// reset the state of the compressor. -func (d *compressor) reset(w io.Writer) { - d.w.reset(w) - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. - if d.fast != nil { - d.fast.Reset() - d.windowEnd = 0 - d.tokens.Reset() - return - } - switch d.compressionLevel.chain { - case 0: - // level was NoCompression or ConstantCompression. - d.windowEnd = 0 - default: - s := d.state - s.chainHead = -1 - for i := range s.hashHead { - s.hashHead[i] = 0 - } - for i := range s.hashPrev { - s.hashPrev[i] = 0 - } - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false - d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.ii = 0 - s.maxInsertIndex = 0 - } -} - -func (d *compressor) close() error { - if d.err != nil { - return d.err - } - d.sync = true - d.step(d) - if d.err != nil { - return d.err - } - if d.w.writeStoredHeader(0, true); d.w.err != nil { - return d.w.err - } - d.w.flush() - d.w.reset(nil) - return d.w.err -} - -// NewWriter returns a new Writer compressing data at the given level. -// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); -// higher levels typically run slower but compress more. -// Level 0 (NoCompression) does not attempt any compression; it only adds the -// necessary DEFLATE framing. -// Level -1 (DefaultCompression) uses the default compression level. -// Level -2 (ConstantCompression) will use Huffman compression only, giving -// a very fast compression for all types of input, but sacrificing considerable -// compression efficiency. -// -// If level is in the range [-2, 9] then the error returned will be nil. -// Otherwise the error returned will be non-nil. -func NewWriter(w io.Writer, level int) (*Writer, error) { - var dw Writer - if err := dw.d.init(w, level); err != nil { - return nil, err - } - return &dw, nil -} - -// NewWriterDict is like NewWriter but initializes the new -// Writer with a preset dictionary. The returned Writer behaves -// as if the dictionary had been written to it without producing -// any compressed output. The compressed data written to w -// can only be decompressed by a Reader initialized with the -// same dictionary. -func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - zw, err := NewWriter(w, level) - if err != nil { - return nil, err - } - zw.d.fillWindow(dict) - zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. - return zw, err -} - -// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. -const MinCustomWindowSize = 32 - -// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. -const MaxCustomWindowSize = windowSize - -// NewWriterWindow returns a new Writer compressing data with a custom window size. -// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. -func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { - if windowSize < MinCustomWindowSize { - return nil, errors.New("flate: requested window size less than MinWindowSize") - } - if windowSize > MaxCustomWindowSize { - return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") - } - var dw Writer - if err := dw.d.init(w, -windowSize); err != nil { - return nil, err - } - return &dw, nil -} - -// A Writer takes data written to it and writes the compressed -// form of that data to an underlying writer (see NewWriter). -type Writer struct { - d compressor - dict []byte -} - -// Write writes data to w, which will eventually write the -// compressed form of data to its underlying writer. -func (w *Writer) Write(data []byte) (n int, err error) { - return w.d.write(data) -} - -// Flush flushes any pending data to the underlying writer. -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. -// Flush does not return until the data has been written. -// Calling Flush when there is no pending data still causes the Writer -// to emit a sync marker of at least 4 bytes. -// If the underlying writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (w *Writer) Flush() error { - // For more about flushing: - // http://www.bolet.org/~pornin/deflate-flush.html - return w.d.syncFlush() -} - -// Close flushes and closes the writer. -func (w *Writer) Close() error { - return w.d.close() -} - -// Reset discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level and dictionary. -func (w *Writer) Reset(dst io.Writer) { - if len(w.dict) > 0 { - // w was created with NewWriterDict - w.d.reset(dst) - if dst != nil { - w.d.fillWindow(w.dict) - } - } else { - // w was created with NewWriter - w.d.reset(dst) - } -} - -// ResetDict discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level, but sets a specific dictionary. -func (w *Writer) ResetDict(dst io.Writer, dict []byte) { - w.dict = dict - w.d.reset(dst) - w.d.fillWindow(w.dict) -} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go deleted file mode 100644 index bb36351a5..000000000 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// dictDecoder implements the LZ77 sliding dictionary as used in decompression. -// LZ77 decompresses data through sequences of two forms of commands: -// -// - Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. -// -// - Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. -// -// For performance reasons, this implementation performs little to no sanity -// checks about the arguments. As such, the invariants documented for each -// method call must be respected. -type dictDecoder struct { - hist []byte // Sliding window history - - // Invariant: 0 <= rdPos <= wrPos <= len(hist) - wrPos int // Current output position in buffer - rdPos int // Have emitted hist[:rdPos] already - full bool // Has a full window length been written yet? -} - -// init initializes dictDecoder to have a sliding window dictionary of the given -// size. If a preset dict is provided, it will initialize the dictionary with -// the contents of dict. -func (dd *dictDecoder) init(size int, dict []byte) { - *dd = dictDecoder{hist: dd.hist} - - if cap(dd.hist) < size { - dd.hist = make([]byte, size) - } - dd.hist = dd.hist[:size] - - if len(dict) > len(dd.hist) { - dict = dict[len(dict)-len(dd.hist):] - } - dd.wrPos = copy(dd.hist, dict) - if dd.wrPos == len(dd.hist) { - dd.wrPos = 0 - dd.full = true - } - dd.rdPos = dd.wrPos -} - -// histSize reports the total amount of historical data in the dictionary. -func (dd *dictDecoder) histSize() int { - if dd.full { - return len(dd.hist) - } - return dd.wrPos -} - -// availRead reports the number of bytes that can be flushed by readFlush. -func (dd *dictDecoder) availRead() int { - return dd.wrPos - dd.rdPos -} - -// availWrite reports the available amount of output buffer space. -func (dd *dictDecoder) availWrite() int { - return len(dd.hist) - dd.wrPos -} - -// writeSlice returns a slice of the available buffer to write data to. -// -// This invariant will be kept: len(s) <= availWrite() -func (dd *dictDecoder) writeSlice() []byte { - return dd.hist[dd.wrPos:] -} - -// writeMark advances the writer pointer by cnt. -// -// This invariant must be kept: 0 <= cnt <= availWrite() -func (dd *dictDecoder) writeMark(cnt int) { - dd.wrPos += cnt -} - -// writeByte writes a single byte to the dictionary. -// -// This invariant must be kept: 0 < availWrite() -func (dd *dictDecoder) writeByte(c byte) { - dd.hist[dd.wrPos] = c - dd.wrPos++ -} - -// writeCopy copies a string at a given (dist, length) to the output. -// This returns the number of bytes copied and may be less than the requested -// length if the available space in the output buffer is too small. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) writeCopy(dist, length int) int { - dstBase := dd.wrPos - dstPos := dstBase - srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } - - // Copy non-overlapping section after destination position. - // - // This section is non-overlapping in that the copy length for this section - // is always less than or equal to the backwards distance. This can occur - // if a distance refers to data that wraps-around in the buffer. - // Thus, a backwards copy is performed here; that is, the exact bytes in - // the source prior to the copy is placed in the destination. - if srcPos < 0 { - srcPos += len(dd.hist) - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) - srcPos = 0 - } - - // Copy possibly overlapping section before destination position. - // - // This section can overlap if the copy length for this section is larger - // than the backwards distance. This is allowed by LZ77 so that repeated - // strings can be succinctly represented using (dist, length) pairs. - // Thus, a forwards copy is performed here; that is, the bytes copied is - // possibly dependent on the resulting bytes in the destination as the copy - // progresses along. This is functionally equivalent to the following: - // - // for i := 0; i < endPos-dstPos; i++ { - // dd.hist[dstPos+i] = dd.hist[srcPos+i] - // } - // dstPos = endPos - // - for dstPos < endPos { - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// tryWriteCopy tries to copy a string at a given (distance, length) to the -// output. This specialized version is optimized for short distances. -// -// This method is designed to be inlined for performance reasons. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) tryWriteCopy(dist, length int) int { - dstPos := dd.wrPos - endPos := dstPos + length - if dstPos < dist || endPos > len(dd.hist) { - return 0 - } - dstBase := dstPos - srcPos := dstPos - dist - - // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// readFlush returns a slice of the historical buffer that is ready to be -// emitted to the user. The data returned by readFlush must be fully consumed -// before calling any other dictDecoder methods. -func (dd *dictDecoder) readFlush() []byte { - toRead := dd.hist[dd.rdPos:dd.wrPos] - dd.rdPos = dd.wrPos - if dd.wrPos == len(dd.hist) { - dd.wrPos, dd.rdPos = 0, 0 - dd.full = true - } - return toRead -} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go deleted file mode 100644 index c8124b5c4..000000000 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" -) - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} - case 2: - return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} - case 3: - return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} - case 4: - return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} - case 5: - return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} - case 6: - return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset - - bTableBits = 17 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. -) - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type tableEntry struct { - offset int32 -} - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastGen struct { - hist []byte - cur int32 -} - -func (e *fastGen) addBlock(src []byte) int32 { - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < maxMatchOffset*2 { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastGen) Reset() { - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= bufferReset { - e.cur += maxMatchOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go deleted file mode 100644 index f70594c34..000000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // The largest offset code. - offsetCodeCount = 30 - - // The special code used to mark the end of a block. - endBlockMarker = 256 - - // The first length code. - lengthCodesStart = 257 - - // The number of codegen codes. - codegenCodeCount = 19 - badCode = 255 - - // maxPredefinedTokens is the maximum number of tokens - // where we check if fixed size is smaller. - maxPredefinedTokens = 250 - - // bufferFlushSize indicates the buffer size - // after which bytes are flushed to the writer. - // Should preferably be a multiple of 6, since - // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 246 -) - -// Minimum length code that emits bits. -const lengthExtraBitsMinCode = 8 - -// The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]uint8{ - /* 257 */ 0, 0, 0, - /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, - /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, - /* 280 */ 4, 5, 5, 5, 5, 0, -} - -// The length indicated by length code X - LENGTH_CODES_START. -var lengthBase = [32]uint8{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, - 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, - 64, 80, 96, 112, 128, 160, 192, 224, 255, -} - -// Minimum offset code that emits bits. -const offsetExtraBitsMinCode = 4 - -// offset code word extra bits. -var offsetExtraBits = [32]int8{ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, - /* extended window */ - 14, 14, -} - -var offsetCombined = [32]uint32{} - -func init() { - var offsetBase = [32]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, - - /* extended window */ - 0x008000, 0x00c000, - } - - for i := range offsetCombined[:] { - // Don't use extended window values... - if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { - continue - } - offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) - } -} - -// The odd order in which the codegen code sizes are written. -var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -type huffmanBitWriter struct { - // writer is the underlying writer. - // Do not use it directly; use the write method, which ensures - // that Write errors are sticky. - writer io.Writer - - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 - nbits uint8 - nbytes uint8 - lastHuffMan bool - literalEncoding *huffmanEncoder - tmpLitEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error - lastHeader int - // Set between 0 (reused block can be up to 2x the size) - logNewTablePenalty uint - bytes [256 + 8]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 - - // codegen must have an extra space for the final symbol. - codegen [literalCount + offsetCodeCount + 1]uint8 -} - -// Huffman reuse. -// -// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -// -// This is controlled by several variables: -// -// If lastHeader is non-zero the Huffman table can be reused. -// This also indicates that a Huffman table has been generated that can output all -// possible symbols. -// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -// an EOB with the previous table must be written. -// -// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -// -// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logNewTablePenalty'. -// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -// is slower both for compression and decompression. - -func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, - literalEncoding: newHuffmanEncoder(literalCount), - tmpLitEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -} - -func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.lastHeader = 0 - w.lastHuffMan = false -} - -func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { - a := t.offHist[:offsetCodeCount] - b := w.offsetEncoding.codes - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.extraHist[:literalCount-256] - b = w.literalEncoding.codes[256:literalCount] - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.litHist[:256] - b = w.literalEncoding.codes[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - return true -} - -func (w *huffmanBitWriter) flush() { - if w.err != nil { - w.nbits = 0 - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - n := w.nbytes - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - if w.nbits > 8 { // Avoid underflow - w.nbits -= 8 - } else { - w.nbits = 0 - } - n++ - } - w.bits = 0 - w.write(w.bytes[:n]) - w.nbytes = 0 -} - -func (w *huffmanBitWriter) write(b []byte) { - if w.err != nil { - return - } - _, w.err = w.writer.Write(b) -} - -func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { - w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { - w.writeOutBits() - } -} - -func (w *huffmanBitWriter) writeBytes(bytes []byte) { - if w.err != nil { - return - } - n := w.nbytes - if w.nbits&7 != 0 { - w.err = InternalError("writeBytes with unfinished bits") - return - } - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - w.nbits -= 8 - n++ - } - if n != 0 { - w.write(w.bytes[:n]) - } - w.nbytes = 0 - w.write(bytes) -} - -// RFC 1951 3.2.7 specifies a special run-length encoding for specifying -// the literal and offset lengths arrays (which are concatenated into a single -// array). This method generates that run-length encoding. -// -// The result is written into the codegen array, and the frequencies -// of each code is written into the codegenFreq array. -// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional -// information. Code badCode is an end marker -// -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use -func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { - for i := range w.codegenFreq { - w.codegenFreq[i] = 0 - } - // Note that we are using codegen both as a temporary variable for holding - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. - codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { - cgnl[i] = litEnc.codes[i].len() - } - - cgnl = codegen[numLiterals : numLiterals+numOffsets] - for i := range cgnl { - cgnl[i] = offEnc.codes[i].len() - } - codegen[numLiterals+numOffsets] = badCode - - size := codegen[0] - count := 1 - outIndex := 0 - for inIndex := 1; size != badCode; inIndex++ { - // INVARIANT: We have seen "count" copies of size that have not yet - // had output generated for them. - nextSize := codegen[inIndex] - if nextSize == size { - count++ - continue - } - // We need to generate codegen indicating "count" of size. - if size != 0 { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - count-- - for count >= 3 { - n := 6 - if n > count { - n = count - } - codegen[outIndex] = 16 - outIndex++ - codegen[outIndex] = uint8(n - 3) - outIndex++ - w.codegenFreq[16]++ - count -= n - } - } else { - for count >= 11 { - n := 138 - if n > count { - n = count - } - codegen[outIndex] = 18 - outIndex++ - codegen[outIndex] = uint8(n - 11) - outIndex++ - w.codegenFreq[18]++ - count -= n - } - if count >= 3 { - // count >= 3 && count <= 10 - codegen[outIndex] = 17 - outIndex++ - codegen[outIndex] = uint8(count - 3) - outIndex++ - w.codegenFreq[17]++ - count = 0 - } - } - count-- - for ; count >= 0; count-- { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - } - // Set up invariant for next time through the loop. - size = nextSize - count = 1 - } - // Marker indicating the end of the codegen. - codegen[outIndex] = badCode -} - -func (w *huffmanBitWriter) codegens() int { - numCodegens := len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return numCodegens -} - -func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7, numCodegens -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { - size = litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) - return size -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { - header, numCodegens := w.headerSize() - size = header + - litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) + - extraBits - return size, numCodegens -} - -// extraBitSize will return the number of bits that will be written -// as "extra" bits on matches. -func (w *huffmanBitWriter) extraBitSize() int { - total := 0 - for i, n := range w.literalFreq[257:literalCount] { - total += int(n) * int(lengthExtraBits[i&31]) - } - for i, n := range w.offsetFreq[:offsetCodeCount] { - total += int(n) * int(offsetExtraBits[i&31]) - } - return total -} - -// fixedSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq[:]) + - fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits -} - -// storedSize calculates the stored size, including header. -// The function returns the size in bits and whether the block -// fits inside a single block. -func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - if in == nil { - return 0, false - } - if len(in) <= maxStoreBlockSize { - return (len(in) + 5) * 8, true - } - return 0, false -} - -func (w *huffmanBitWriter) writeCode(c hcode) { - // The function does not get inlined if we "& 63" the shift. - w.bits |= c.code64() << (w.nbits & 63) - w.nbits += c.len() - if w.nbits >= 48 { - w.writeOutBits() - } -} - -// writeOutBits will write bits to the buffer. -func (w *huffmanBitWriter) writeOutBits() { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - - // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - - w.nbytes = n -} - -// Write the header of a dynamic Huffman block to the output stream. -// -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen -func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { - if w.err != nil { - return - } - var firstBits int32 = 4 - if isEof { - firstBits = 5 - } - w.writeBits(firstBits, 3) - w.writeBits(int32(numLiterals-257), 5) - w.writeBits(int32(numOffsets-1), 5) - w.writeBits(int32(numCodegens-4), 4) - - for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) - w.writeBits(int32(value), 3) - } - - i := 0 - for { - var codeWord = uint32(w.codegen[i]) - i++ - if codeWord == badCode { - break - } - w.writeCode(w.codegenEncoding.codes[codeWord]) - - switch codeWord { - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ - } - } -} - -// writeStoredHeader will write a stored header. -// If the stored block is only used for EOF, -// it is replaced with a fixed huffman block. -func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. - if length == 0 && isEof { - w.writeFixedHeader(isEof) - // EOB: 7 bits, value: 0 - w.writeBits(0, 7) - w.flush() - return - } - - var flag int32 - if isEof { - flag = 1 - } - w.writeBits(flag, 3) - w.flush() - w.writeBits(int32(length), 16) - w.writeBits(int32(^uint16(length)), 16) -} - -func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { - value = 3 - } - w.writeBits(value, 3) -} - -// writeBlock will write a block of tokens with the smallest encoding. -// The original input can be supplied, and if the huffman encoded data -// is larger than the original bytes, the data will be written as a -// stored block. -// If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - - tokens.AddEOB() - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate() - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { - extraBits = w.extraBitSize() - } - - // Figure out smallest code. - // Fixed Huffman baseline. - var literalEncoding = fixedLiteralEncoding - var offsetEncoding = fixedOffsetEncoding - var size = math.MaxInt32 - if tokens.n < maxPredefinedTokens { - size = w.fixedSize(extraBits) - } - - // Dynamic Huffman? - var numCodegens int - - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - if dynamicSize < size { - size = dynamicSize - literalEncoding = w.literalEncoding - offsetEncoding = w.offsetEncoding - } - - // Stored bytes? - if storable && storedSize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Huffman. - if literalEncoding == fixedLiteralEncoding { - w.writeFixedHeader(eof) - } else { - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - } - - // Write the tokens. - w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) -} - -// writeBlockDynamic encodes a block using a dynamic Huffman table. -// This should be used if the symbols used have a disproportionate -// histogram distribution. -// If input is supplied and the compression savings are below 1/16th of the -// input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - sync = sync || eof - if sync { - tokens.AddEOB() - } - - // We cannot reuse pure huffman table, and must mark as EOF. - if (w.lastHuffMan || eof) && w.lastHeader > 0 { - // We will not try to reuse. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } - - // fillReuse enables filling of empty values. - // This will make encodings always reusable without testing. - // However, this does not appear to benefit on most cases. - const fillReuse = false - - // Check if we can reuse... - if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - numLiterals, numOffsets := w.indexTokens(tokens, !sync) - extraBits := 0 - ssize, storable := w.storedSize(input) - - const usePrefs = true - if storable || w.lastHeader > 0 { - extraBits = w.extraBitSize() - } - - var size int - - // Check if we should reuse. - if w.lastHeader > 0 { - // Estimate size for using a new table. - // Use the previous header size as the best estimate. - newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty - - // The estimated size is calculated as an optimal table. - // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits - - // Check if a new table is better. - if newSize < reuseSize { - // Write the EOB we owe. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - size = newSize - w.lastHeader = 0 - } else { - size = reuseSize - } - - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - - // We want a new block/table - if w.lastHeader == 0 { - if fillReuse && !sync { - w.fillTokens() - numLiterals, numOffsets = maxNumLit, maxNumDist - } else { - w.literalFreq[endBlockMarker] = 1 - } - - w.generate() - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - - var numCodegens int - if fillReuse && !sync { - // Reindex for accurate size... - w.indexTokens(tokens, true) - } - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - // Store predefined, if we don't get a reasonable improvement. - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - - if storable && ssize <= size { - // Store bytes, if we don't get an improvement. - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - if !sync { - w.lastHeader, _ = w.headerSize() - } - w.lastHuffMan = false - } - - if sync { - w.lastHeader = 0 - } - // Write the tokens. - w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) -} - -func (w *huffmanBitWriter) fillTokens() { - for i, v := range w.literalFreq[:literalCount] { - if v == 0 { - w.literalFreq[i] = 1 - } - } - for i, v := range w.offsetFreq[:offsetCodeCount] { - if v == 0 { - w.offsetFreq[i] = 1 - } - } -} - -// indexTokens indexes a slice of tokens, and updates -// literalFreq and offsetFreq, and generates literalEncoding -// and offsetEncoding. -// The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - //copy(w.literalFreq[:], t.litHist[:]) - *(*[256]uint16)(w.literalFreq[:]) = t.litHist - //copy(w.literalFreq[256:], t.extraHist[:]) - *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist - w.offsetFreq = t.offHist - - if t.n == 0 { - return - } - if filled { - return maxNumLit, maxNumDist - } - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { - numLiterals-- - } - // get the number of offsets - numOffsets = len(w.offsetFreq) - for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { - numOffsets-- - } - if numOffsets == 0 { - // We haven't found a single match. If we want to go with the dynamic encoding, - // we should count at least one offset to be sure that the offset huffman tree could be encoded. - w.offsetFreq[0] = 1 - numOffsets = 1 - } - return -} - -func (w *huffmanBitWriter) generate() { - w.literalEncoding.generate(w.literalFreq[:literalCount], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeTokens writes a slice of tokens to the output. -// codes for literal and offset encoding must be supplied. -func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { - if w.err != nil { - return - } - if len(tokens) == 0 { - return - } - - // Only last token should be endBlockMarker. - var deferEOB bool - if tokens[len(tokens)-1] == endBlockMarker { - tokens = tokens[:len(tokens)-1] - deferEOB = true - } - - // Create slices up to the next power of two to avoid bounds checks. - lits := leCodes[:256] - offs := oeCodes[:32] - lengths := leCodes[lengthCodesStart:] - lengths = lengths[:32] - - // Go 1.16 LOVES having these on stack. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - for _, t := range tokens { - if t < 256 { - //w.writeCode(lits[t.literal()]) - c := lits[t] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - continue - } - - // Write the length - length := t.length() - lengthCode := lengthCode(length) & 31 - if false { - w.writeCode(lengths[lengthCode]) - } else { - // inlined - c := lengths[lengthCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if lengthCode >= lengthExtraBitsMinCode { - extraLengthBits := lengthExtraBits[lengthCode] - //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode]) - bits |= uint64(extraLength) << (nbits & 63) - nbits += extraLengthBits - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - // Write the offset - offset := t.offset() - offsetCode := (offset >> 16) & 31 - if false { - w.writeCode(offs[offsetCode]) - } else { - // inlined - c := offs[offsetCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if offsetCode >= offsetExtraBitsMinCode { - offsetComb := offsetCombined[offsetCode] - //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) - nbits += uint8(offsetComb) - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if deferEOB { - w.writeCode(leCodes[endBlockMarker]) - } -} - -// huffOffset is a static offset encoder used for huffman only encoding. -// It can be reused since we will not be encoding offset values. -var huffOffset *huffmanEncoder - -func init() { - w := newHuffmanBitWriter(nil) - w.offsetFreq[0] = 1 - huffOffset = newHuffmanEncoder(offsetCodeCount) - huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeBlockHuff encodes a block of bytes as either -// Huffman encoded literals or uncompressed bytes if the -// results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram - for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } - if !w.lastHuffMan { - for i := range w.offsetFreq[:] { - w.offsetFreq[i] = 0 - } - } - - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 - - // Add everything as literals - // We have to estimate the header size. - // Assume header is around 70 bytes: - // https://stackoverflow.com/a/25454430 - const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals]) - ssize, storable := w.storedSize(input) - if storable && len(input) > 1024 { - // Quick check for incompressible content. - abs := float64(0) - avg := float64(len(input)) / 256 - max := float64(len(input) * 2) - for _, v := range w.literalFreq[:256] { - diff := float64(v) - avg - abs += diff * diff - if abs > max { - break - } - } - if abs < max { - if debugDeflate { - fmt.Println("stored", abs, "<", max) - } - // No chance we can compress this... - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - if estBits < math.MaxInt32 { - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits - } - estBits += estBits >> w.logNewTablePenalty - } - - // Store bytes, if we don't get a reasonable improvement. - if storable && ssize <= estBits { - if debugDeflate { - fmt.Println("stored,", ssize, "<=", estBits) - } - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - if w.lastHeader > 0 { - reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) - - if estBits < reuseSize { - if debugDeflate { - fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") - } - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } else if debugDeflate { - fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) - } - } - - count := 0 - if w.lastHeader == 0 { - // Use the temp encoding, so swap. - w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - numCodegens := w.codegens() - - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHuffMan = true - w.lastHeader, _ = w.headerSize() - if debugDeflate { - count += w.lastHeader - fmt.Println("header:", count/8) - } - } - - encoding := w.literalEncoding.codes[:256] - // Go 1.16 LOVES having these on stack. At least 1.5x the speed. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - if debugDeflate { - count -= int(nbytes)*8 + int(nbits) - } - // Unroll, write 3 codes/loop. - // Fastest number of unrolls. - for len(input) > 3 { - // We must have at least 48 bits free. - if nbits >= 8 { - n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - bits >>= (n * 8) & 63 - nbits -= n * 8 - nbytes += n - } - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - a, b := encoding[input[0]], encoding[input[1]] - bits |= a.code64() << (nbits & 63) - bits |= b.code64() << ((nbits + a.len()) & 63) - c := encoding[input[2]] - nbits += b.len() + a.len() - bits |= c.code64() << (nbits & 63) - nbits += c.len() - input = input[3:] - } - - // Remaining... - for _, t := range input { - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= c.code64() << (nbits & 63) - - nbits += c.len() - if debugDeflate { - count += int(c.len()) - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if debugDeflate { - nb := count + int(nbytes)*8 + int(nbits) - fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") - } - // Flush if needed to have space. - if w.nbits >= 48 { - w.writeOutBits() - } - - if eof || sync { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go deleted file mode 100644 index be7b58b47..000000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "math" - "math/bits" -) - -const ( - maxBitsLimit = 16 - // number of valid literals - literalCount = 286 -) - -// hcode is a huffman code with a bit code and bit length. -type hcode uint32 - -func (h hcode) len() uint8 { - return uint8(h) -} - -func (h hcode) code64() uint64 { - return uint64(h >> 8) -} - -func (h hcode) zero() bool { - return h == 0 -} - -type huffmanEncoder struct { - codes []hcode - bitCount [17]int32 - - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - freqcache [literalCount + 1]literalNode -} - -type literalNode struct { - literal uint16 - freq uint16 -} - -// A levelInfo describes the state of the constructed tree for a given depth. -type levelInfo struct { - // Our level. for better printing - level int32 - - // The frequency of the last node at this level - lastFreq int32 - - // The frequency of the next character to add to this level - nextCharFreq int32 - - // The frequency of the next pair (from level below) to add to this level. - // Only valid if the "needed" value of the next lower level is 0. - nextPairFreq int32 - - // The number of chains remaining to generate for this level before moving - // up to the next level - needed int32 -} - -// set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint8) { - *h = hcode(length) | (hcode(code) << 8) -} - -func newhcode(code uint16, length uint8) hcode { - return hcode(length) | (hcode(code) << 8) -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return bits.Reverse16(number << ((16 - bitLength) & 15)) -} - -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - -func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. - c := uint(bits.Len32(uint32(size - 1))) - return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 -// The cases of 0, 1, and 2 literals are handled by special case code. -// -// list An array of the literals with non-zero frequencies -// -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 -// -// maxBits The maximum number of bits that should be used to encode any literal. -// -// Must be less than 16. -// -// return An integer array in which array[i] indicates the number of literals -// -// that should be encoded in i bits. -func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - if maxBits >= maxBitsLimit { - panic("flate: maxBits too large") - } - n := int32(len(list)) - list = list[0 : n+1] - list[n] = maxNode() - - // The tree can't have greater depth than n - 1, no matter what. This - // saves a little bit of work in some small cases - if maxBits > n-1 { - maxBits = n - 1 - } - - // Create information about each of the levels. - // A bogus "Level 0" whose sole purpose is so that - // level1.prev.needed==0. This makes level1.nextPairFreq - // be a legitimate value that never gets chosen. - var levels [maxBitsLimit]levelInfo - // leafCounts[i] counts the number of literals at the left - // of ancestors of the rightmost node at level i. - // leafCounts[i][j] is the number of literals at the left - // of the level j ancestor. - var leafCounts [maxBitsLimit][maxBitsLimit]int32 - - // Descending to only have 1 bounds check. - l2f := int32(list[2].freq) - l1f := int32(list[1].freq) - l0f := int32(list[0].freq) + int32(list[1].freq) - - for level := int32(1); level <= maxBits; level++ { - // For every level, the first two items are the first two characters. - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, - lastFreq: l1f, - nextCharFreq: l2f, - nextPairFreq: l0f, - } - leafCounts[level][level] = 2 - if level == 1 { - levels[level].nextPairFreq = math.MaxInt32 - } - } - - // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[maxBits].needed = 2*n - 4 - - level := uint32(maxBits) - for level < 16 { - l := &levels[level] - if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { - // We've run out of both leafs and pairs. - // End all calculations for this level. - // To make sure we never come back to this level or any lower level, - // set nextPairFreq impossibly large. - l.needed = 0 - levels[level+1].nextPairFreq = math.MaxInt32 - level++ - continue - } - - prevFreq := l.lastFreq - if l.nextCharFreq < l.nextPairFreq { - // The next item on this row is a leaf node. - n := leafCounts[level][level] + 1 - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n - e := list[n] - if e.literal < math.MaxUint16 { - l.nextCharFreq = int32(e.freq) - } else { - l.nextCharFreq = math.MaxInt32 - } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two - // more values in the level below - l.lastFreq = l.nextPairFreq - // Take leaf counts from the lower level, except counts[level] remains the same. - if true { - save := leafCounts[level][level] - leafCounts[level] = leafCounts[level-1] - leafCounts[level][level] = save - } else { - copy(leafCounts[level][:level], leafCounts[level-1][:level]) - } - levels[l.level-1].needed = 2 - } - - if l.needed--; l.needed == 0 { - // We've done everything we need to do for this level. - // Continue calculating one level up. Fill in nextPairFreq - // of that level with the sum of the two nodes we've just calculated on - // this level. - if l.level == maxBits { - // All done! - break - } - levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq - level++ - } else { - // If we stole from below, move down temporarily to replenish it. - for levels[level-1].needed > 0 { - level-- - } - } - } - - // Somethings is wrong if at the end, the top level is null or hasn't used - // all of the leaves. - if leafCounts[maxBits][maxBits] != n { - panic("leafCounts[maxBits][maxBits] != n") - } - - bitCount := h.bitCount[:maxBits+1] - bits := 1 - counts := &leafCounts[maxBits] - for level := maxBits; level > 0; level-- { - // chain.leafCount gives the number of literals requiring at least "bits" - // bits to encode. - bitCount[bits] = counts[level] - counts[level-1] - bits++ - } - return bitCount -} - -// Look at the leaves and assign them a bit count and an encoding as specified -// in RFC 1951 3.2.2 -func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { - code := uint16(0) - for n, bits := range bitCount { - code <<= 1 - if n == 0 || bits == 0 { - continue - } - // The literals list[len(list)-bits] .. list[len(list)-bits] - // are encoded using "bits" bits, and get the values - // code, code + 1, .... The code values are - // assigned in literal order (not frequency order). - chunk := list[len(list)-int(bits):] - - sortByLiteral(chunk) - for _, node := range chunk { - h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) - code++ - } - list = list[0 : len(list)-int(bits)] - } -} - -// Update this Huffman Code object to be the minimum code for the specified frequency count. -// -// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. -// maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - list := h.freqcache[:len(freq)+1] - codes := h.codes[:len(freq)] - // Number of non-zero literals - count := 0 - // Set list to be the set of all non-zero literals and their frequencies - for i, f := range freq { - if f != 0 { - list[count] = literalNode{uint16(i), f} - count++ - } else { - codes[i] = 0 - } - } - list[count] = literalNode{} - - list = list[:count] - if count <= 2 { - // Handle the small cases here, because they are awkward for the general case code. With - // two or fewer literals, everything has bit length 1. - for i, node := range list { - // "list" is in order of increasing literal value. - h.codes[node.literal].set(uint16(i), 1) - } - return - } - sortByFreq(list) - - // Get the number of literals for each bit count - bitCount := h.bitCounts(list, maxBits) - // And do the assignment - h.assignEncodingAndSize(bitCount, list) -} - -// atLeastOne clamps the result between 1 and 15. -func atLeastOne(v float32) float32 { - if v < 1 { - return 1 - } - if v > 15 { - return 15 - } - return v -} - -func histogram(b []byte, h []uint16) { - if true && len(b) >= 8<<10 { - // Split for bigger inputs - histogramSplit(b, h) - } else { - h = h[:256] - for _, t := range b { - h[t]++ - } - } -} - -func histogramSplit(b []byte, h []uint16) { - // Tested, and slightly faster than 2-way. - // Writing to separate arrays and combining is also slightly slower. - h = h[:256] - for len(b)&3 != 0 { - h[b[0]]++ - b = b[1:] - } - n := len(b) / 4 - x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] - y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] - for i, t := range x { - v0 := &h[t] - v1 := &h[y[i]] - v3 := &h[w[i]] - v2 := &h[z[i]] - *v0++ - *v1++ - *v2++ - *v3++ - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go deleted file mode 100644 index 6c05ba8c1..000000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByFreq(data []literalNode) { - n := len(data) - quickSortByFreq(data, 0, n, maxDepth(n)) -} - -func quickSortByFreq(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivotByFreq(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSortByFreq(data, a, mlo, maxDepth) - a = mhi // i.e., quickSortByFreq(data, mhi, b) - } else { - quickSortByFreq(data, mhi, b, maxDepth) - b = mlo // i.e., quickSortByFreq(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSortByFreq(data, a, b) - } -} - -func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) - medianOfThreeSortByFreq(data, m, m-s, m+s) - medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThreeSortByFreq(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { - } - b := a - for { - for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot - } - for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot - } - for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSortByFreq(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// quickSortByFreq, loosely following Bentley and McIlroy, -// ``Engineering a Sort Function,'' SP&E November 1993. - -// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go deleted file mode 100644 index 93f1aea10..000000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByLiteral(data []literalNode) { - n := len(data) - quickSort(data, 0, n, maxDepth(n)) -} - -func quickSort(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivot(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSort(data, a, mlo, maxDepth) - a = mhi // i.e., quickSort(data, mhi, b) - } else { - quickSort(data, mhi, b, maxDepth) - b = mlo // i.e., quickSort(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].literal < data[i-6].literal { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSort(data, a, b) - } -} -func heapSort(data []literalNode, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDown(data, lo, i, first) - } -} - -// siftDown implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDown(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && data[first+child].literal < data[first+child+1].literal { - child++ - } - if data[first+root].literal > data[first+child].literal { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThree(data, lo, lo+s, lo+2*s) - medianOfThree(data, m, m-s, m+s) - medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThree(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && data[a].literal < data[pivot].literal; a++ { - } - b := a - for { - for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot - } - for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].literal > data[pivot].literal { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot - } - for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSort(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && data[j].literal < data[j-1].literal; j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// maxDepth returns a threshold at which quicksort should switch -// to heapsort. It returns 2*ceil(lg(n+1)). -func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 -} - -// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThree(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].literal < data[m1].literal { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go deleted file mode 100644 index 0d7b437f1..000000000 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ /dev/null @@ -1,865 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flate implements the DEFLATE compressed data format, described in -// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file -// formats. -package flate - -import ( - "bufio" - "compress/flate" - "fmt" - "io" - "math/bits" - "sync" -) - -const ( - maxCodeLen = 16 // max length of Huffman code - maxCodeLenMask = 15 // mask for max length of Huffman code - // The next three numbers come from the RFC section 3.2.7, with the - // additional proviso in section 3.2.5 which implies that distance codes - // 30 and 31 should never occur in compressed data. - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code - - debugDecode = false -) - -// Value of length - 3 and extra bits. -type lengthExtra struct { - length, extra uint8 -} - -var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// Initialize the fixedHuffmanDecoder only once upon first use. -var fixedOnce sync.Once -var fixedHuffmanDecoder huffmanDecoder - -// A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError = flate.CorruptInputError - -// An InternalError reports an error in the flate code itself. -type InternalError string - -func (e InternalError) Error() string { return "flate: internal error: " + string(e) } - -// A ReadError reports an error encountered while reading input. -// -// Deprecated: No longer returned. -type ReadError = flate.ReadError - -// A WriteError reports an error encountered while writing output. -// -// Deprecated: No longer returned. -type WriteError = flate.WriteError - -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to -// to switch to a new underlying Reader. This permits reusing a ReadCloser -// instead of allocating a new one. -type Resetter interface { - // Reset discards any buffered data and resets the Resetter as if it was - // newly initialized with the given reader. - Reset(r io.Reader, dict []byte) error -} - -// The data structure for decoding Huffman tables is based on that of -// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), -// For codes smaller than the table width, there are multiple entries -// (each combination of trailing bits has the same value). For codes -// larger than the table width, the table contains a link to an overflow -// table. The width of each entry in the link table is the maximum code -// size minus the chunk width. -// -// Note that you can do a lookup in the table even without all bits -// filled. Since the extra bits are zero, and the DEFLATE Huffman codes -// have the property that shorter codes come before longer ones, the -// bit length estimate in the result is a lower bound on the actual -// number of bits. -// -// See the following: -// http://www.gzip.org/algorithm.txt - -// chunk & 15 is number of bits -// chunk >> 4 is value, including table link - -const ( - huffmanChunkBits = 9 - huffmanNumChunks = 1 << huffmanChunkBits - huffmanCountMask = 15 - huffmanValueShift = 4 -) - -type huffmanDecoder struct { - maxRead int // the maximum number of bits we can read and not overread - chunks *[huffmanNumChunks]uint16 // chunks as described above - links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table -} - -// Initialize Huffman decoding tables from array of code lengths. -// Following this function, h is guaranteed to be initialized into a complete -// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a -// degenerate case where the tree has only a single symbol with length 1. Empty -// trees are permitted. -func (h *huffmanDecoder) init(lengths []int) bool { - // Sanity enables additional runtime tests during Huffman - // table construction. It's intended to be used during - // development to supplement the currently ad-hoc unit tests. - const sanity = false - - if h.chunks == nil { - h.chunks = new([huffmanNumChunks]uint16) - } - - if h.maxRead != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} - } - - // Count number of codes of each length, - // compute maxRead and max length. - var count [maxCodeLen]int - var min, max int - for _, n := range lengths { - if n == 0 { - continue - } - if min == 0 || n < min { - min = n - } - if n > max { - max = n - } - count[n&maxCodeLenMask]++ - } - - // Empty tree. The decompressor.huffSym function will fail later if the tree - // is used. Technically, an empty tree is only valid for the HDIST tree and - // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree - // is guaranteed to fail since it will attempt to use the tree to decode the - // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is - // guaranteed to fail later since the compressed data section must be - // composed of at least one symbol (the end-of-block marker). - if max == 0 { - return true - } - - code := 0 - var nextcode [maxCodeLen]int - for i := min; i <= max; i++ { - code <<= 1 - nextcode[i&maxCodeLenMask] = code - code += count[i&maxCodeLenMask] - } - - // Check that the coding is complete (i.e., that we've - // assigned all 2-to-the-max possible bit sequences). - // Exception: To be compatible with zlib, we also need to - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1< huffmanChunkBits { - numLinks := 1 << (uint(max) - huffmanChunkBits) - h.linkMask = uint32(numLinks - 1) - - // create link tables - link := nextcode[huffmanChunkBits+1] >> 1 - if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } - for j := uint(link); j < huffmanNumChunks; j++ { - reverse := int(bits.Reverse16(uint16(j))) - reverse >>= uint(16 - huffmanChunkBits) - off := j - uint(link) - if sanity && h.chunks[reverse] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[reverse] = uint16(off<>= uint(16 - n) - if n <= huffmanChunkBits { - for off := reverse; off < len(h.chunks); off += 1 << uint(n) { - // We should never need to overwrite - // an existing chunk. Also, 0 is - // never a valid chunk, because the - // lower 4 "count" bits should be - // between 1 and 15. - if sanity && h.chunks[off] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[off] = chunk - } - } else { - j := reverse & (huffmanNumChunks - 1) - if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { - // Longer codes should have been - // associated with a link table above. - panic("impossible: not an indirect chunk") - } - value := h.chunks[j] >> huffmanValueShift - linktab := h.links[value] - reverse >>= huffmanChunkBits - for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { - if sanity && linktab[off] != 0 { - panic("impossible: overwriting existing chunk") - } - linktab[off] = chunk - } - } - } - - if sanity { - // Above we've sanity checked that we never overwrote - // an existing entry. Here we additionally check that - // we filled the tables completely. - for i, chunk := range h.chunks { - if chunk == 0 { - // As an exception, in the degenerate - // single-code case, we allow odd - // chunks to be missing. - if code == 1 && i%2 == 1 { - continue - } - panic("impossible: missing chunk") - } - } - for _, linktab := range h.links { - for _, chunk := range linktab { - if chunk == 0 { - panic("impossible: missing chunk") - } - } - } - } - - return true -} - -// Reader is the actual read interface needed by NewReader. -// If the passed in io.Reader does not also have ReadByte, -// the NewReader will introduce its own buffering. -type Reader interface { - io.Reader - io.ByteReader -} - -type step uint8 - -const ( - copyData step = iota + 1 - nextBlock - huffmanBytesBuffer - huffmanBytesReader - huffmanBufioReader - huffmanStringsReader - huffmanGenericReader -) - -// flushMode tells decompressor when to return data -type flushMode uint8 - -const ( - syncFlush flushMode = iota // return data after sync flush block - partialFlush // return data after each block -) - -// Decompress state. -type decompressor struct { - // Input source. - r Reader - roffset int64 - - // Huffman decoders for literal/length, distance. - h1, h2 huffmanDecoder - - // Length arrays used to define Huffman codes. - bits *[maxNumLit + maxNumDist]int - codebits *[numCodes]int - - // Output history, buffer. - dict dictDecoder - - // Next step in the decompression, - // and decompression state. - step step - stepState int - err error - toRead []byte - hl, hd *huffmanDecoder - copyLen int - copyDist int - - // Temporary buffer (avoids repeated allocation). - buf [4]byte - - // Input bits, in top of b. - b uint32 - - nb uint - final bool - - flushMode flushMode -} - -func (f *decompressor) nextBlock() { - for f.nb < 1+2 { - if f.err = f.moreBits(); f.err != nil { - return - } - } - f.final = f.b&1 == 1 - f.b >>= 1 - typ := f.b & 3 - f.b >>= 2 - f.nb -= 1 + 2 - switch typ { - case 0: - f.dataBlock() - if debugDecode { - fmt.Println("stored block") - } - case 1: - // compressed, fixed Huffman tables - f.hl = &fixedHuffmanDecoder - f.hd = nil - f.huffmanBlockDecoder() - if debugDecode { - fmt.Println("predefinied huffman block") - } - case 2: - // compressed, dynamic Huffman tables - if f.err = f.readHuffman(); f.err != nil { - break - } - f.hl = &f.h1 - f.hd = &f.h2 - f.huffmanBlockDecoder() - if debugDecode { - fmt.Println("dynamic huffman block") - } - default: - // 3 is reserved. - if debugDecode { - fmt.Println("reserved data block encountered") - } - f.err = CorruptInputError(f.roffset) - } -} - -func (f *decompressor) Read(b []byte) (int, error) { - for { - if len(f.toRead) > 0 { - n := copy(b, f.toRead) - f.toRead = f.toRead[n:] - if len(f.toRead) == 0 { - return n, f.err - } - return n, nil - } - if f.err != nil { - return 0, f.err - } - - f.doStep() - - if f.err != nil && len(f.toRead) == 0 { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - } - } -} - -// WriteTo implements the io.WriteTo interface for io.Copy and friends. -func (f *decompressor) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - flushed := false - for { - if len(f.toRead) > 0 { - n, err := w.Write(f.toRead) - total += int64(n) - if err != nil { - f.err = err - return total, err - } - if n != len(f.toRead) { - return total, io.ErrShortWrite - } - f.toRead = f.toRead[:0] - } - if f.err != nil && flushed { - if f.err == io.EOF { - return total, nil - } - return total, f.err - } - if f.err == nil { - f.doStep() - } - if len(f.toRead) == 0 && f.err != nil && !flushed { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - flushed = true - } - } -} - -func (f *decompressor) Close() error { - if f.err == io.EOF { - return nil - } - return f.err -} - -// RFC 1951 section 3.2.7. -// Compression with dynamic Huffman codes - -var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -func (f *decompressor) readHuffman() error { - // HLIT[5], HDIST[5], HCLEN[4]. - for f.nb < 5+5+4 { - if err := f.moreBits(); err != nil { - return err - } - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { - if debugDecode { - fmt.Println("nlit > maxNumLit", nlit) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { - if debugDecode { - fmt.Println("ndist > maxNumDist", ndist) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - nclen := int(f.b&0xF) + 4 - // numCodes is 19, so nclen is always valid. - f.b >>= 4 - f.nb -= 5 + 5 + 4 - - // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { - for f.nb < 3 { - if err := f.moreBits(); err != nil { - return err - } - } - f.codebits[codeOrder[i]] = int(f.b & 0x7) - f.b >>= 3 - f.nb -= 3 - } - for i := nclen; i < len(codeOrder); i++ { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { - if debugDecode { - fmt.Println("init codebits failed") - } - return CorruptInputError(f.roffset) - } - - // HLIT + 257 code lengths, HDIST + 1 code lengths, - // using the code length Huffman code. - for i, n := 0, nlit+ndist; i < n; { - x, err := f.huffSym(&f.h1) - if err != nil { - return err - } - if x < 16 { - // Actual length. - f.bits[i] = x - i++ - continue - } - // Repeat previous length or zero. - var rep int - var nb uint - var b int - switch x { - default: - return InternalError("unexpected length code") - case 16: - rep = 3 - nb = 2 - if i == 0 { - if debugDecode { - fmt.Println("i==0") - } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] - case 17: - rep = 3 - nb = 3 - b = 0 - case 18: - rep = 11 - nb = 7 - b = 0 - } - for f.nb < nb { - if err := f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits:", err) - } - return err - } - } - rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - if i+rep > n { - if debugDecode { - fmt.Println("i+rep > n", i, rep, n) - } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { - f.bits[i] = b - i++ - } - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { - if debugDecode { - fmt.Println("init2 failed") - } - return CorruptInputError(f.roffset) - } - - // As an optimization, we can initialize the maxRead bits to read at a time - // for the HLIT tree to the length of the EOB marker since we know that - // every block must terminate with one. This preserves the property that - // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.maxRead < f.bits[endBlockMarker] { - f.h1.maxRead = f.bits[endBlockMarker] - } - if !f.final { - // If not the final block, the smallest block possible is - // a predefined table, BTYPE=01, with a single EOB marker. - // This will take up 3 + 7 bits. - f.h1.maxRead += 10 - } - - return nil -} - -// Copy a single uncompressed data block from input to output. -func (f *decompressor) dataBlock() { - // Uncompressed. - // Discard current half-byte. - left := (f.nb) & 7 - f.nb -= left - f.b >>= left - - offBytes := f.nb >> 3 - // Unfilled values will be overwritten. - f.buf[0] = uint8(f.b) - f.buf[1] = uint8(f.b >> 8) - f.buf[2] = uint8(f.b >> 16) - f.buf[3] = uint8(f.b >> 24) - - f.roffset += int64(offBytes) - f.nb, f.b = 0, 0 - - // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) - f.roffset += int64(nr) - if err != nil { - f.err = noEOF(err) - return - } - n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 - nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 - if nn != ^n { - if debugDecode { - ncomp := ^n - fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) - } - f.err = CorruptInputError(f.roffset) - return - } - - if n == 0 { - if f.flushMode == syncFlush { - f.toRead = f.dict.readFlush() - } - - f.finishBlock() - return - } - - f.copyLen = int(n) - f.copyData() -} - -// copyData copies f.copyLen bytes from the underlying reader into f.hist. -// It pauses for reads when f.hist is full. -func (f *decompressor) copyData() { - buf := f.dict.writeSlice() - if len(buf) > f.copyLen { - buf = buf[:f.copyLen] - } - - cnt, err := io.ReadFull(f.r, buf) - f.roffset += int64(cnt) - f.copyLen -= cnt - f.dict.writeMark(cnt) - if err != nil { - f.err = noEOF(err) - return - } - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = copyData - return - } - f.finishBlock() -} - -func (f *decompressor) finishBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - - f.err = io.EOF - } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - - f.step = nextBlock -} - -func (f *decompressor) doStep() { - switch f.step { - case copyData: - f.copyData() - case nextBlock: - f.nextBlock() - case huffmanBytesBuffer: - f.huffmanBytesBuffer() - case huffmanBytesReader: - f.huffmanBytesReader() - case huffmanBufioReader: - f.huffmanBufioReader() - case huffmanStringsReader: - f.huffmanStringsReader() - case huffmanGenericReader: - f.huffmanGenericReader() - default: - panic("BUG: unexpected step state") - } -} - -// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. -func noEOF(e error) error { - if e == io.EOF { - return io.ErrUnexpectedEOF - } - return e -} - -func (f *decompressor) moreBits() error { - c, err := f.r.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << (f.nb & regSizeMaskUint32) - f.nb += 8 - return nil -} - -// Read the next Huffman-encoded symbol from f according to h. -func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(h.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - return 0, noEOF(err) - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := h.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - return int(chunk >> huffmanValueShift), nil - } - } -} - -func makeReader(r io.Reader) Reader { - if rr, ok := r.(Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -func fixedHuffmanDecoderInit() { - fixedOnce.Do(func() { - // These come from the RFC section 3.2.6. - var bits [288]int - for i := 0; i < 144; i++ { - bits[i] = 8 - } - for i := 144; i < 256; i++ { - bits[i] = 9 - } - for i := 256; i < 280; i++ { - bits[i] = 7 - } - for i := 280; i < 288; i++ { - bits[i] = 8 - } - fixedHuffmanDecoder.init(bits[:]) - }) -} - -func (f *decompressor) Reset(r io.Reader, dict []byte) error { - *f = decompressor{ - r: makeReader(r), - bits: f.bits, - codebits: f.codebits, - h1: f.h1, - h2: f.h2, - dict: f.dict, - step: nextBlock, - } - f.dict.init(maxMatchOffset, dict) - return nil -} - -type ReaderOpt func(*decompressor) - -// WithPartialBlock tells decompressor to return after each block, -// so it can read data written with partial flush -func WithPartialBlock() ReaderOpt { - return func(f *decompressor) { - f.flushMode = partialFlush - } -} - -// WithDict initializes the reader with a preset dictionary -func WithDict(dict []byte) ReaderOpt { - return func(f *decompressor) { - f.dict.init(maxMatchOffset, dict) - } -} - -// NewReaderOpts returns new reader with provided options -func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, nil) - - for _, opt := range opts { - opt(&f) - } - - return &f -} - -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { - return NewReaderOpts(r) -} - -// NewReaderDict is like NewReader but initializes the reader -// with a preset dictionary. The returned Reader behaves as if -// the uncompressed data stream started with the given dictionary, -// which has already been read. NewReaderDict is typically used -// to read data compressed by NewWriterDict. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - return NewReaderOpts(r, WithDict(dict)) -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go deleted file mode 100644 index 2b2f993f7..000000000 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ /dev/null @@ -1,1283 +0,0 @@ -// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( - "bufio" - "bytes" - "fmt" - "math/bits" - "strings" -) - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesBuffer() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Buffer) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesBuffer - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesBuffer // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBytesReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBufioReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bufio.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanBufioReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanBufioReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanStringsReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*strings.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanStringsReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanStringsReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanGenericReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = huffmanGenericReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = huffmanGenericReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -func (f *decompressor) huffmanBlockDecoder() { - switch f.r.(type) { - case *bytes.Buffer: - f.huffmanBytesBuffer() - case *bytes.Reader: - f.huffmanBytesReader() - case *bufio.Reader: - f.huffmanBufioReader() - case *strings.Reader: - f.huffmanStringsReader() - case Reader: - f.huffmanGenericReader() - default: - f.huffmanGenericReader() - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go deleted file mode 100644 index 703b9a89a..000000000 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL1 struct { - fastGen - table [tableSize]tableEntry -} - -// EncodeL1 uses a similar algorithm to level 1 -func (e *fastEncL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - candidate = e.table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - cv = now - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - if false { - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - } else { - // Inlined... - xoffset := uint32(s - t - baseMatchOffset) - xlength := l - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - dst.extraHist[lengthCodes1[uint8(xl)]]++ - dst.offHist[oc]++ - dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { - s = nextS + 1 - } - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, tableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashLen(x, tableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = x >> 8 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go deleted file mode 100644 index 876dfbe30..000000000 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ /dev/null @@ -1,214 +0,0 @@ -package flate - -import "fmt" - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL2 struct { - fastGen - table [bTableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *fastEncL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - // When should we start skipping if we haven't found matches in a long while. - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, bTableBits, hashBytes) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash] - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, bTableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every second hash in-between, but offset by 1. - for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, i) - nextHash := hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4} - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, bTableBits, hashBytes) - prevHash2 := hashLen(x>>8, bTableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hashLen(x>>16, bTableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = x >> 24 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go deleted file mode 100644 index 7aa2b72a1..000000000 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import "fmt" - -// fastEncL3 -type fastEncL3 struct { - fastGen - table [1 << 16]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *fastEncL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - tableBits = 16 - tableSize = 1 << tableBits - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - } - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - e.table[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // Skip if too small. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 7 - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - s = nextS - nextS = s + 1 + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash] - now := load6432(src, nextS) - - // Safe offset distance until s + 4... - minOffset := e.cur + s - (maxMatchOffset - 4) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} - - // Check both candidates - candidate = candidates.Cur - if candidate.offset < minOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { - break - } - // Both match and are valid, pick longest. - offset := s - (candidate.offset - e.cur) - o2 := s - (candidates.Prev.offset - e.cur) - l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) - if l2 > l1 { - candidate = candidates.Prev - } - break - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - } - cv = now - } - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+8) < len(src) && t > 0 { - cv = load6432(src, t) - nextHash := hashLen(cv, tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t}, - } - } - goto emitRemainder - } - - // Store every 5th hash in-between. - for i := s - l + 2; i < s-5; i += 6 { - nextHash := hashLen(load6432(src, i), tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + i}} - } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. - x := load6432(src, s-2) - prevHash := hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2}, - } - x >>= 8 - prevHash = hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1}, - } - x >>= 8 - currHash := hashLen(x, tableBits, hashBytes) - candidates := e.table[currHash] - cv = x - e.table[currHash] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur}, - } - - // Check both candidates - candidate = candidates.Cur - minOffset := e.cur + s - (maxMatchOffset - 4) - - if candidate.offset > minOffset { - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Found a match... - continue - } - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Match at prev... - continue - } - } - cv = x >> 8 - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go deleted file mode 100644 index 23c08b325..000000000 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ /dev/null @@ -1,221 +0,0 @@ -package flate - -import "fmt" - -type fastEncL4 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntry -} - -func (e *fastEncL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.bTable[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - e.bTable[nextHashL] = entry - - t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { - // We got a long match. Use that. - break - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - lCandidate = e.bTable[hash7(next, tableBits)] - - // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { - l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) - if l2 > l1 { - s = nextS - t = lCandidate.offset - e.cur - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic("s-t") - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every 3rd hash in-between - if true { - i := nextS - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - - i += 3 - for ; i < s-1; i += 3 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - e.bTable[prevHashL] = tableEntry{offset: o} - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go deleted file mode 100644 index 1f61ec182..000000000 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ /dev/null @@ -1,708 +0,0 @@ -package flate - -import "fmt" - -type fastEncL5 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// fastEncL5Window is a level 5 encoder, -// but with a custom window size. -type fastEncL5Window struct { - hist []byte - cur int32 - maxOffset int32 - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - maxMatchOffset := e.maxOffset - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} - -// Reset the encoding table. -func (e *fastEncL5Window) Reset() { - // We keep the same allocs, since we are compressing the same block sizes. - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= int32(bufferReset) { - e.cur += e.maxOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} - -func (e *fastEncL5Window) addBlock(src []byte) int32 { - // check if we have space already - maxMatchOffset := e.maxOffset - - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < int(maxMatchOffset*2) { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > e.maxOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go deleted file mode 100644 index f1e9d98fa..000000000 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ /dev/null @@ -1,325 +0,0 @@ -package flate - -import "fmt" - -type fastEncL6 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL6) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - // Repeat MUST be > 1 and within range - repeat := int32(1) - for { - const skipLog = 7 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - // Calculate hashes of 'next' - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Long candidate matches at least 4 bytes. - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check the previous long candidate as well. - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - // Current value did not match, but check if previous long value does. - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - - // Look up next long candidate (at nextS) - lCandidate = e.bTable[nextHashL] - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check repeat at s + repOff - const repOff = 1 - t2 := s - repeat + repOff - if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 - if ml > l { - t = t2 - l = ml - s += repOff - // Not worth checking more. - break - } - } - - // If the next long is a candidate, use that... - t2 = lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - // This is ok, but check previous as well. - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end-of-match... - if sAt := s + l; sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] - // Test current - t2 := eLong.Cur.offset - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if off < maxMatchOffset { - if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - // Test next: - t2 = eLong.Prev.offset - e.cur - l + skipBeginning - off := s2 - t2 - if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if false { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - repeat = s - t - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index after match end. - for i := nextS + 1; i < int32(len(src))-8; i += 2 { - cv := load6432(src, i) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur - } - goto emitRemainder - } - - // Store every long hash in-between and every second short. - if true { - for i := nextS + 1; i < s-1; i += 2 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong.Cur, eLong.Prev = t, eLong.Cur - eLong2.Cur, eLong2.Prev = t2, eLong2.Cur - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - cv = load6432(src, s) - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd388584..000000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 0782b86e3..000000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go deleted file mode 100644 index ad5cd814b..000000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go deleted file mode 100644 index 6ed28061b..000000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go +++ /dev/null @@ -1,37 +0,0 @@ -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 7 - reg8SizeMask16 = 15 - reg8SizeMask32 = 31 - reg8SizeMask64 = 63 - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = reg8SizeMask8 - reg16SizeMask16 = reg8SizeMask16 - reg16SizeMask32 = reg8SizeMask32 - reg16SizeMask64 = reg8SizeMask64 - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = reg8SizeMask8 - reg32SizeMask16 = reg8SizeMask16 - reg32SizeMask32 = reg8SizeMask32 - reg32SizeMask64 = reg8SizeMask64 - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = reg8SizeMask8 - reg64SizeMask16 = reg8SizeMask16 - reg64SizeMask32 = reg8SizeMask32 - reg64SizeMask64 = reg8SizeMask64 - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = reg8SizeMask8 - regSizeMaskUint16 = reg8SizeMask16 - regSizeMaskUint32 = reg8SizeMask32 - regSizeMaskUint64 = reg8SizeMask64 -) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go deleted file mode 100644 index 1b7a2cbd7..000000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !amd64 -// +build !amd64 - -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 0xff - reg8SizeMask16 = 0xff - reg8SizeMask32 = 0xff - reg8SizeMask64 = 0xff - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = 0xffff - reg16SizeMask16 = 0xffff - reg16SizeMask32 = 0xffff - reg16SizeMask64 = 0xffff - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = 0xffffffff - reg32SizeMask16 = 0xffffffff - reg32SizeMask32 = 0xffffffff - reg32SizeMask64 = 0xffffffff - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = 0xffffffffffffffff - reg64SizeMask16 = 0xffffffffffffffff - reg64SizeMask32 = 0xffffffffffffffff - reg64SizeMask64 = 0xffffffffffffffff - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = ^uint(0) - regSizeMaskUint16 = ^uint(0) - regSizeMaskUint32 = ^uint(0) - regSizeMaskUint64 = ^uint(0) -) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go deleted file mode 100644 index f3d4139ef..000000000 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ /dev/null @@ -1,318 +0,0 @@ -package flate - -import ( - "io" - "math" - "sync" -) - -const ( - maxStatelessBlock = math.MaxInt16 - // dictionary will be taken from maxStatelessBlock, so limit it. - maxStatelessDict = 8 << 10 - - slTableBits = 13 - slTableSize = 1 << slTableBits - slTableShift = 32 - slTableBits -) - -type statelessWriter struct { - dst io.Writer - closed bool -} - -func (s *statelessWriter) Close() error { - if s.closed { - return nil - } - s.closed = true - // Emit EOF block - return StatelessDeflate(s.dst, nil, true, nil) -} - -func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false, nil) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (s *statelessWriter) Reset(w io.Writer) { - s.dst = w - s.closed = false -} - -// NewStatelessWriter will do compression but without maintaining any state -// between Write calls. -// There will be no memory kept between Write calls, -// but compression and speed will be suboptimal. -// Because of this, the size of actual Write calls will affect output size. -func NewStatelessWriter(dst io.Writer) io.WriteCloser { - return &statelessWriter{dst: dst} -} - -// bitWriterPool contains bit writers that can be reused. -var bitWriterPool = sync.Pool{ - New: func() interface{} { - return newHuffmanBitWriter(nil) - }, -} - -// StatelessDeflate allows compressing directly to a Writer without retaining state. -// When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. -// Longer dictionaries will be truncated and will still produce valid output. -// Sending nil dictionary is perfectly fine. -func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens - bw := bitWriterPool.Get().(*huffmanBitWriter) - bw.reset(out) - defer func() { - // don't keep a reference to our output - bw.reset(nil) - bitWriterPool.Put(bw) - }() - if eof && len(in) == 0 { - // Just write an EOF block. - // Could be faster... - bw.writeStoredHeader(0, true) - bw.flush() - return bw.err - } - - // Truncate dict - if len(dict) > maxStatelessDict { - dict = dict[len(dict)-maxStatelessDict:] - } - - // For subsequent loops, keep shallow dict reference to avoid alloc+copy. - var inDict []byte - - for len(in) > 0 { - todo := in - if len(inDict) > 0 { - if len(todo) > maxStatelessBlock-maxStatelessDict { - todo = todo[:maxStatelessBlock-maxStatelessDict] - } - } else if len(todo) > maxStatelessBlock-len(dict) { - todo = todo[:maxStatelessBlock-len(dict)] - } - inOrg := in - in = in[len(todo):] - uncompressed := todo - if len(dict) > 0 { - // combine dict and source - bufLen := len(todo) + len(dict) - combined := make([]byte, bufLen) - copy(combined, dict) - copy(combined[len(dict):], todo) - todo = combined - } - // Compress - if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) - } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) - } - isEof := eof && len(in) == 0 - - if dst.n == 0 { - bw.writeStoredHeader(len(uncompressed), isEof) - if bw.err != nil { - return bw.err - } - bw.writeBytes(uncompressed) - } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { - // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) - } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) - } - if len(in) > 0 { - // Retain a dict if we have more - inDict = inOrg[len(uncompressed)-maxStatelessDict:] - dict = nil - dst.Reset() - } - if bw.err != nil { - return bw.err - } - } - if !eof { - // Align, only a stored block can do that. - bw.writeStoredHeader(0, false) - } - bw.flush() - return bw.err -} - -func hashSL(u uint32) uint32 { - return (u * 0x1e35a7bd) >> slTableShift -} - -func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func statelessEnc(dst *tokens, src []byte, startAt int16) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - type tableEntry struct { - offset int16 - } - - var table [slTableSize]tableEntry - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src)-int(startAt) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = 0 - return - } - // Index until startAt - if startAt > 0 { - cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { - table[hashSL(cv)] = tableEntry{offset: i} - cv = (cv >> 8) | (uint32(src[i+4]) << 24) - } - } - - s := startAt + 1 - nextEmit := startAt - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int16(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load3216(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashSL(cv) - candidate = table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit || nextS <= 0 { - goto emitRemainder - } - - now := load6416(src, nextS) - table[nextHash] = tableEntry{offset: s} - nextHash = hashSL(uint32(now)) - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - - // Do one right away... - cv = uint32(now) - s = nextS - nextS++ - candidate = table[nextHash] - now >>= 8 - table[nextHash] = tableEntry{offset: s} - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - cv = uint32(now) - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - l := int16(matchLen(src[s+4:], src[t+4:]) + 4) - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6416(src, s-2) - o := s - 2 - prevHash := hashSL(uint32(x)) - table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashSL(uint32(x)) - candidate = table[currHash] - table[currHash] = tableEntry{offset: o + 2} - - if uint32(x) != load3216(src, candidate.offset) { - cv = uint32(x >> 8) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go deleted file mode 100644 index d818790c1..000000000 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits - // bits 16-22 offsetcode - 5 bits - // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits - // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits - lengthShift = 22 - offsetMask = 1<maxnumlit - offHist [32]uint16 // offset codes - litHist [256]uint16 // codes 0->255 - nFilled int - n uint16 // Must be able to contain maxStoreBlockSize - tokens [maxStoreBlockSize + 1]token -} - -func (t *tokens) Reset() { - if t.n == 0 { - return - } - t.n = 0 - t.nFilled = 0 - for i := range t.litHist[:] { - t.litHist[i] = 0 - } - for i := range t.extraHist[:] { - t.extraHist[i] = 0 - } - for i := range t.offHist[:] { - t.offHist[i] = 0 - } -} - -func (t *tokens) Fill() { - if t.n == 0 { - return - } - for i, v := range t.litHist[:] { - if v == 0 { - t.litHist[i] = 1 - t.nFilled++ - } - } - for i, v := range t.extraHist[:literalCount-256] { - if v == 0 { - t.nFilled++ - t.extraHist[i] = 1 - } - } - for i, v := range t.offHist[:offsetCodeCount] { - if v == 0 { - t.offHist[i] = 1 - } - } -} - -func indexTokens(in []token) tokens { - var t tokens - t.indexTokens(in) - return t -} - -func (t *tokens) indexTokens(in []token) { - t.Reset() - for _, tok := range in { - if tok < matchType { - t.AddLiteral(tok.literal()) - continue - } - t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) - } -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - for _, v := range lit { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } -} - -func (t *tokens) AddLiteral(lit byte) { - t.tokens[t.n] = token(lit) - t.litHist[lit]++ - t.n++ -} - -// from https://stackoverflow.com/a/28730362 -func mFastLog2(val float32) float32 { - ux := int32(math.Float32bits(val)) - log2 := (float32)(((ux >> 23) & 255) - 128) - ux &= -0x7f800001 - ux += 127 << 23 - uval := math.Float32frombits(uint32(ux)) - log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 - return log2 -} - -// EstimatedBits will return an minimum size estimated by an *optimal* -// compression of the block. -// The size of the block -func (t *tokens) EstimatedBits() int { - shannon := float32(0) - bits := int(0) - nMatches := 0 - total := int(t.n) + t.nFilled - if total > 0 { - invTotal := 1.0 / float32(total) - for _, v := range t.litHist[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - // Just add 15 for EOB - shannon += 15 - for i, v := range t.extraHist[1 : literalCount-256] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(lengthExtraBits[i&31]) * int(v) - nMatches += int(v) - } - } - } - if nMatches > 0 { - invTotal := 1.0 / float32(nMatches) - for i, v := range t.offHist[:offsetCodeCount] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(offsetExtraBits[i&31]) * int(v) - } - } - } - return int(shannon) + bits -} - -// AddMatch adds a match to the tokens. -// This function is very sensitive to inlining and right on the border. -func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDeflate { - if xlength >= maxMatchLength+baseMatchLength { - panic(fmt.Errorf("invalid length: %v", xlength)) - } - if xoffset >= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oCode := offsetCode(xoffset) - xoffset |= oCode << 16 - - t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode&31]++ - t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - // We need to have at least baseMatchLength left over for next loop. - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc&31]++ - t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } - -// Convert length to code. -func lengthCode(len uint8) uint8 { return lengthCodes[len] } - -// Returns the offset code corresponding to a specific offset -func offsetCode(off uint32) uint32 { - if false { - if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 - } - } - if off < uint32(len(offsetCodes)) { - return offsetCodes[uint8(off)] - } - return offsetCodes14[uint8(off>>7)] -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da6..000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3bb7..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d60..000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 074018d8f..000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfd..000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205..000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958..000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c663..000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d9742f..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aaac..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 84aa3d12f..000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,742 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - _ = s.count // Assert that s != nil to speed up the following loop. - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.srcLen)) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 0f56b02d7..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errors in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errors, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b0..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2d1..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de6..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 77ecd68e0..000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - srcLen int - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.srcLen = len(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c5121..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e..000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go deleted file mode 100644 index affbbbb59..000000000 --- a/vendor/github.com/klauspost/compress/internal/race/norace.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !race - -package race - -func ReadSlice[T any](s []T) { -} - -func WriteSlice[T any](s []T) { -} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go deleted file mode 100644 index f5e240dcd..000000000 --- a/vendor/github.com/klauspost/compress/internal/race/race.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build race - -package race - -import ( - "runtime" - "unsafe" -) - -func ReadSlice[T any](s []T) { - if len(s) == 0 { - return - } - runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) -} - -func WriteSlice[T any](s []T) { - if len(s) == 0 { - return - } - runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2754bac6f..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore deleted file mode 100644 index 3a89c6e3e..000000000 --- a/vendor/github.com/klauspost/compress/s2/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE deleted file mode 100644 index 1d2d645bd..000000000 --- a/vendor/github.com/klauspost/compress/s2/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md deleted file mode 100644 index 8284bb081..000000000 --- a/vendor/github.com/klauspost/compress/s2/README.md +++ /dev/null @@ -1,1120 +0,0 @@ -# S2 Compression - -S2 is an extension of [Snappy](https://github.com/google/snappy). - -S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. - -Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. -This means that S2 can seamlessly replace Snappy without converting compressed content. - -S2 can produce Snappy compatible output, faster and better than Snappy. -If you want full benefit of the changes you should use s2 without Snappy compatibility. - -S2 is designed to have high throughput on content that cannot be compressed. -This is important, so you don't have to worry about spending CPU cycles on already compressed data. - -## Benefits over Snappy - -* Better compression -* Adjustable compression (3 levels) -* Concurrent stream compression -* Faster decompression, even for Snappy compatible content -* Concurrent Snappy/S2 stream decompression -* Skip forward in compressed stream -* Random seeking with indexes -* Compatible with reading Snappy compressed content -* Smaller block size overhead on incompressible blocks -* Block concatenation -* Block Dictionary support -* Uncompressed stream mode -* Automatic stream size padding -* Snappy compatible block compression - -## Drawbacks over Snappy - -* Not optimized for 32 bit systems -* Streams use slightly more memory due to larger blocks and concurrency (configurable) - -# Usage - -Installation: `go get -u github.com/klauspost/compress/s2` - -Full package documentation: - -[![godoc][1]][2] - -[1]: https://godoc.org/github.com/klauspost/compress?status.svg -[2]: https://godoc.org/github.com/klauspost/compress/s2 - -## Compression - -```Go -func EncodeStream(src io.Reader, dst io.Writer) error { - enc := s2.NewWriter(dst) - _, err := io.Copy(enc, src) - if err != nil { - enc.Close() - return err - } - // Blocks until compression is done. - return enc.Close() -} -``` - -You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. - -For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. - -The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. -It is possible to flush any buffered data using the `Flush()` method. -This will block until all data sent to the encoder has been written to the output. - -S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. - -As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, -a slightly more efficient method is to use the `EncodeBuffer` method. -This will take ownership of the buffer until the stream is closed. - -```Go -func EncodeStream(src []byte, dst io.Writer) error { - enc := s2.NewWriter(dst) - // The encoder owns the buffer until Flush or Close is called. - err := enc.EncodeBuffer(buf) - if err != nil { - enc.Close() - return err - } - // Blocks until compression is done. - return enc.Close() -} -``` - -Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, -so it should only be used a single time per stream. -If you need to write several blocks, you should use the regular io.Writer interface. - - -## Decompression - -```Go -func DecodeStream(src io.Reader, dst io.Writer) error { - dec := s2.NewReader(src) - _, err := io.Copy(dst, dec) - return err -} -``` - -Similar to the Writer, a Reader can be reused using the `Reset` method. - -For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. -However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. - -For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. -Do however note that these functions (similar to Snappy) does not provide validation of data, -so data corruption may be undetected. Stream encoding provides CRC checks of data. - -It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. -For big skips the decompressor is able to skip blocks without decompressing them. - -## Single Blocks - -Similar to Snappy S2 offers single block compression. -Blocks do not offer the same flexibility and safety as streams, -but may be preferable for very small payloads, less than 100K. - -Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. -It is possible to provide a destination buffer. -If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. -If not a new will be allocated. - -Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. - -Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. -Again an optional destination buffer can be supplied. -The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. -If that is not satisfied a new buffer will be allocated. - -Block function always operate on a single goroutine since it should only be used for small payloads. - -# Commandline tools - -Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. - -Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). - -Installing then requires Go to be installed. To install them, use: - -`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` - -To build binaries to the current folder use: - -`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` - - -## s2c - -``` -Usage: s2c [options] file1 file2 - -Compresses all files supplied as input separately. -Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. -By default output files will be overwritten. -Use - as the only file name to read from stdin and write to stdout. - -Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt -Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt - -File names beginning with 'http://' and 'https://' will be downloaded and compressed. -Only http response code 200 is accepted. - -Options: - -bench int - Run benchmark n times. No output will be written - -blocksize string - Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") - -c Write all output to stdout. Multiple input files will be concatenated - -cpu int - Compress using this amount of threads (default 32) - -faster - Compress faster, but with a minor compression loss - -help - Display help - -index - Add seek index (default true) - -o string - Write output to another file. Single input file only - -pad string - Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") - -q Don't write any output to terminal, except errors - -rm - Delete source file(s) after successful compression - -safe - Do not overwrite output files - -slower - Compress more, but a lot slower - -snappy - Generate Snappy compatible output stream - -verify - Verify written files - -``` - -## s2d - -``` -Usage: s2d [options] file1 file2 - -Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. -Output file names have the extension removed. By default output files will be overwritten. -Use - as the only file name to read from stdin and write to stdout. - -Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt -Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt - -File names beginning with 'http://' and 'https://' will be downloaded and decompressed. -Extensions on downloaded files are ignored. Only http response code 200 is accepted. - -Options: - -bench int - Run benchmark n times. No output will be written - -c Write all output to stdout. Multiple input files will be concatenated - -help - Display help - -o string - Write output to another file. Single input file only - -offset string - Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index - -q Don't write any output to terminal, except errors - -rm - Delete source file(s) after successful decompression - -safe - Do not overwrite output files - -tail string - Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index - -verify - Verify files, but do not write output -``` - -## s2sx: self-extracting archives - -s2sx allows creating self-extracting archives with no dependencies. - -By default, executables are created for the same platforms as the host os, -but this can be overridden with `-os` and `-arch` parameters. - -Extracted files have 0666 permissions, except when untar option used. - -``` -Usage: s2sx [options] file1 file2 - -Compresses all files supplied as input separately. -If files have '.s2' extension they are assumed to be compressed already. -Output files are written as 'filename.s2sx' and with '.exe' for windows targets. -If output is big, an additional file with ".more" is written. This must be included as well. -By default output files will be overwritten. - -Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt -Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt - -Options: - -arch string - Destination architecture (default "amd64") - -c Write all output to stdout. Multiple input files will be concatenated - -cpu int - Compress using this amount of threads (default 32) - -help - Display help - -max string - Maximum executable size. Rest will be written to another file. (default "1G") - -os string - Destination operating system (default "windows") - -q Don't write any output to terminal, except errors - -rm - Delete source file(s) after successful compression - -safe - Do not overwrite output files - -untar - Untar on destination -``` - -Available platforms are: - - * darwin-amd64 - * darwin-arm64 - * linux-amd64 - * linux-arm - * linux-arm64 - * linux-mips64 - * linux-ppc64le - * windows-386 - * windows-amd64 - -By default, there is a size limit of 1GB for the output executable. - -When this is exceeded the remaining file content is written to a file called -output+`.more`. This file must be included for a successful extraction and -placed alongside the executable for a successful extraction. - -This file *must* have the same name as the executable, so if the executable is renamed, -so must the `.more` file. - -This functionality is disabled with stdin/stdout. - -### Self-extracting TAR files - -If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. - -Files are extracted to the current folder with the path specified in the tar file. - -Note that tar files are not validated before they are wrapped. - -For security reasons files that move below the root folder are not allowed. - -# Performance - -This section will focus on comparisons to Snappy. -This package is solely aimed at replacing Snappy as a high speed compression package. -If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) -gives better compression, but typically at speeds slightly below "better" mode in this package. - -Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. - -Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. - -A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. -The content compressed in this mode is fully compatible with the standard decoder. - -Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): - -| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | -|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| -| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | -| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | -| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | -| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | -| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | -| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | -| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | -| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | -| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | -| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | -| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | -| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | -| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | -| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | -| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | -| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | -| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | -| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | -| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | -| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | -| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | -| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | - -### Legend - -* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. -* `S2 Throughput`: Throughput of S2 in MB/s. -* `S2 % smaller`: How many percent of the Snappy output size is S2 better. -* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. -* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. -* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. - -There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. - -Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. - -The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. - -Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. -This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). - -## Decompression - -S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. - -S2 vs Snappy **decompression** speed. Both operating on single core: - -| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | -|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| -| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | -| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | -| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | -| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | -| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | -| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | -| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | -| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | -| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | -| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | -| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | - -### Legend - -* `S2 Throughput`: Decompression speed of S2 encoded content. -* `Better Throughput`: Decompression speed of S2 "better" encoded content. -* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. - - -While the decompression code hasn't changed, there is a significant speedup in decompression speed. -S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. -While this reduces compression a bit, it improves decompression speed. - -The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. - -Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: - -| File | S2 Throughput | S2 throughput | -|--------------------------------|---------------|---------------| -| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | -| 10gb.tar.s2 | 1.30x | 867.07 MB/s | -| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | -| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | -| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | -| enwik9.s2 | 1.67x | 681.53 MB/s | -| adresser.json.s2 | 3.41x | 4230.53 MB/s | -| silesia.tar.s2 | 1.52x | 811.58 | - -Even though S2 typically compresses better than Snappy, decompression speed is always better. - -### Concurrent Stream Decompression - -For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) -that will decode a full stream using multiple goroutines. - -Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3: - -| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | -|-------------------------------------------|------------|------------|------------|------------|-------------| -| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | -| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | -| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | -| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | -| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | - -Scaling can be expected to be pretty linear until memory bandwidth is saturated. - -For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. - -## Block compression - - -When compressing blocks no concurrent compression is performed just as Snappy. -This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. - -An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. -In rare, worst case scenario Snappy blocks could be significantly bigger than the input. - -### Mixed content blocks - -The most reliable is a wide dataset. -For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), -53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. - -| * | Input | Output | Reduction | MB/s | -|-------------------|------------|------------|------------|------------| -| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | -| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | -| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | -| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | -| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | -| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | - -S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". -"Better" mode provides the same compression speed as LZ4 with better compression ratio. - -When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. - -As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. - -Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for -other Go compressors: - -| * | Input | Output | Reduction | MB/s | -|-------------------|------------|------------|-----------|--------| -| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | -| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | -| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | -| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | - -### Standard block compression - -Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. -So individual benchmarks should only be seen as a guideline and the overall picture is more important. - -These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. - -Block compression. Parallel benchmark running on 16 cores, 16 goroutines. - -AMD64 assembly is use for both S2 and Snappy. - -| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | -|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| -| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | -| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | -| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | -| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | -| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | -| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | -| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | -| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | -| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | -| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | -| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | -| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | -| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | -| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | -| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | - - -Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. - -Decompression speed is better than Snappy, except in one case. - -Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. - -Size is on average around Snappy, but varies on content type. -In cases where compression is worse, it usually is compensated by a speed boost. - - -### Better compression - -Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. -So individual benchmarks should only be seen as a guideline and the overall picture is more important. - -| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | -|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| -| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | -| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | -| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | -| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | -| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | -| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | -| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | -| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | -| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | -| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | -| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | -| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | -| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | -| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | -| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | -| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | - - -Except for the mostly incompressible JPEG image compression is better and usually in the -double digits in terms of percentage reduction over Snappy. - -The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder -to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. - -This mode aims to provide better compression at the expense of performance and achieves that -without a huge performance penalty, except on very small blocks. - -Decompression speed suffers a little compared to the regular S2 mode, -but still manages to be close to Snappy in spite of increased compression. - -# Best compression mode - -S2 offers a "best" compression mode. - -This will compress as much as possible with little regard to CPU usage. - -Mainly for offline compression, but where decompression speed should still -be high and compatible with other S2 compressed data. - -Some examples compared on 16 core CPU, amd64 assembly used: - -``` -* enwik10 -Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s -Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s -Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s - -* github-june-2days-2019.json -Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s -Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s -Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s - -* nyc-taxi-data-10M.csv -Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s -Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s -Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s - -* 10gb.tar -Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s -Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s -Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ - -* consensus.db.10gb -Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s -Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s -Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s -``` - -Decompression speed should be around the same as using the 'better' compression mode. - -## Dictionaries - -*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for -neither encoding nor decoding. Performance improvements can be expected in the future.* - -Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. - -The same dictionary *must* be used for both encoding and decoding. -S2 does not keep track of whether the same dictionary is used, -and using the wrong dictionary will most often not result in an error when decompressing. - -Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. -This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries -and treat the blocks similarly. - -Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), -the same usage scenario applies to S2 dictionaries. - -> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. - -S2 further limits the dictionary to only be enabled on the first 64KB of a block. -This will remove any negative (speed) impacts of the dictionaries on bigger blocks. - -### Compression - -Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) -and a 64KB dictionary trained with zStandard the following sizes can be achieved. - -| | Default | Better | Best | -|--------------------|------------------|------------------|-----------------------| -| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | -| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | - -So for highly repetitive content, this case provides an almost 3x reduction in size. - -For less uniform data we will use the Go source code tree. -Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: - -| | Default | Better | Best | -|--------------------|-------------------|-------------------|-------------------| -| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | -| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | -| Saving/file | 362 bytes | 428 bytes | 472 bytes | - - -### Creating Dictionaries - -There are no tools to create dictionaries in S2. -However, there are multiple ways to create a useful dictionary: - -#### Using a Sample File - -If your input is very uniform, you can just use a sample file as the dictionary. - -For example in the `github_users_sample_set` above, the average compression only goes up from -10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. - -```Go - // Read a sample - sample, err := os.ReadFile("sample.json") - - // Create a dictionary. - dict := s2.MakeDict(sample, nil) - - // b := dict.Bytes() will provide a dictionary that can be saved - // and reloaded with s2.NewDict(b). - - // To encode: - encoded := dict.Encode(nil, file) - - // To decode: - decoded, err := dict.Decode(nil, file) -``` - -#### Using Zstandard - -Zstandard dictionaries can easily be converted to S2 dictionaries. - -This can be helpful to generate dictionaries for files that don't have a fixed structure. - - -Example, with training set files placed in `./training-set`: - -`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` - -This will create a dictionary of 64KB, that can be converted to a dictionary like this: - -```Go - // Decode the Zstandard dictionary. - insp, err := zstd.InspectDictionary(zdict) - if err != nil { - panic(err) - } - - // We are only interested in the contents. - // Assume that files start with "// Copyright (c) 2023". - // Search for the longest match for that. - // This may save a few bytes. - dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) - - // b := dict.Bytes() will provide a dictionary that can be saved - // and reloaded with s2.NewDict(b). - - // We can now encode using this dictionary - encodedWithDict := dict.Encode(nil, payload) - - // To decode content: - decoded, err := dict.Decode(nil, encodedWithDict) -``` - -It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. - -This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. - -Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. -This can be omitted, at the expense of a few bytes. - -# Snappy Compatibility - -S2 now offers full compatibility with Snappy. - -This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. - -There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by -simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. -This uses "better" mode for all operations. -If you would like more control, you can use the s2 package as described below: - -## Blocks - -Snappy compatible blocks can be generated with the S2 encoder. -Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace - -| Snappy | S2 replacement | -|---------------------------|-----------------------| -| snappy.Encode(...) | s2.EncodeSnappy(...) | -| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | - -`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. - -`s2.ConcatBlocks` is compatible with snappy blocks. - -Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), -53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: - -| Encoder | Size | MB/s | Reduction | -|-----------------------|------------|------------|------------| -| snappy.Encode | 1128706759 | 725.59 | 71.89% | -| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | -| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | -| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | - -## Streams - -For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. -All other options are available, but note that block size limit is different for snappy. - -Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: - -| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | -|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| -| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | -| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | -| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | -| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | -| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | - -# Decompression - -All decompression functions map directly to equivalent s2 functions. - -| Snappy | S2 replacement | -|------------------------|--------------------| -| snappy.Decode(...) | s2.Decode(...) | -| snappy.DecodedLen(...) | s2.DecodedLen(...) | -| snappy.NewReader(...) | s2.NewReader(...) | - -Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) -are also available for Snappy streams. - -If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) -on your Reader will reduce memory consumption. - -# Concatenating blocks and streams. - -Concatenating streams will concatenate the output of both without recompressing them. -While this is inefficient in terms of compression it might be usable in certain scenarios. -The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. - -Blocks can be concatenated using the `ConcatBlocks` function. - -Snappy blocks/streams can safely be concatenated with S2 blocks and streams. -Streams with indexes (see below) will currently not work on concatenated streams. - -# Stream Seek Index - -S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. - -The index can either be appended to the stream as a skippable block or returned for separate storage. - -When the index is appended to a stream it will be skipped by regular decoders, -so the output remains compatible with other decoders. - -## Creating an Index - -To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. -Then the index will be added to the stream when `Close()` is called. - -``` - // Add Index to stream... - enc := s2.NewWriter(w, s2.WriterAddIndex()) - io.Copy(enc, r) - enc.Close() -``` - -If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. -This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. - -``` - // Get index for separate storage... - enc := s2.NewWriter(w) - io.Copy(enc, r) - index, err := enc.CloseIndex() -``` - -The `index` can then be used needing to read from the stream. -This means the index can be used without needing to seek to the end of the stream -or for manually forwarding streams. See below. - -Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. - -## Using Indexes - -To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. - -Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. - -If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. -Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. - -``` - dec := s2.NewReader(r) - rs, err := dec.ReadSeeker(false, nil) - rs.Seek(wantOffset, io.SeekStart) -``` - -Get a seeker to seek forward. Since no index is provided, the index is read from the stream. -This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. - -A custom index can be specified which will be used if supplied. -When using a custom index, it will not be read from the input stream. - -``` - dec := s2.NewReader(r) - rs, err := dec.ReadSeeker(false, index) - rs.Seek(wantOffset, io.SeekStart) -``` - -This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker - -``` - dec := s2.NewReader(r) - rs, err := dec.ReadSeeker(true, index) - rs.Seek(wantOffset, io.SeekStart) -``` - -Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. - -The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, -meaning changes performed to one is reflected in the other. - -To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. - -## Manually Forwarding Streams - -Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. -This can be used for parsing indexes, either separate or in streams. - -In some cases it may not be possible to serve a seekable stream. -This can for instance be an HTTP stream, where the Range request -is sent at the start of the stream. - -With a little bit of extra code it is still possible to use indexes -to forward to specific offset with a single forward skip. - -It is possible to load the index manually like this: -``` - var index s2.Index - _, err = index.Load(idxBytes) -``` - -This can be used to figure out how much to offset the compressed stream: - -``` - compressedOffset, uncompressedOffset, err := index.Find(wantOffset) -``` - -The `compressedOffset` is the number of bytes that should be skipped -from the beginning of the compressed file. - -The `uncompressedOffset` will then be offset of the uncompressed bytes returned -when decoding from that position. This will always be <= wantOffset. - -When creating a decoder it must be specified that it should *not* expect a stream identifier -at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` -we create the decoder like this: - -``` - dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) -``` - -We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. -This is done using the regular "Skip" function: - -``` - err = dec.Skip(wantOffset - uncompressedOffset) -``` - -This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. - -# Compact storage - -For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from -a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). - -This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. - -## Index Format: - -Each block is structured as a snappy skippable block, with the chunk ID 0x99. - -The block can be read from the front, but contains information so it can be read from the back as well. - -Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), -with un-encoded value length of 64 bits, unless other limits are specified. - -| Content | Format | -|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| -| ID, `[1]byte` | Always 0x99. | -| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | -| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | -| UncompressedSize, Varint | Total Uncompressed size. | -| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | -| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | -| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | -| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | -| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | -| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | -| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | -| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | - -For regular streams the uncompressed offsets are fully predictable, -so `HasUncompressedOffsets` allows to specify that compressed blocks all have -exactly `EstBlockSize` bytes of uncompressed content. - -Entries *must* be in order, starting with the lowest offset, -and there *must* be no uncompressed offset duplicates. -Entries *may* point to the start of a skippable block, -but it is then not allowed to also have an entry for the next block since -that would give an uncompressed offset duplicate. - -There is no requirement for all blocks to be represented in the index. -In fact there is a maximum of 65536 block entries in an index. - -The writer can use any method to reduce the number of entries. -An implicit block start at 0,0 can be assumed. - -### Decoding entries: - -``` -// Read Uncompressed entries. -// Each assumes EstBlockSize delta from previous. -for each entry { - uOff = 0 - if HasUncompressedOffsets == 1 { - uOff = ReadVarInt // Read value from stream - } - - // Except for the first entry, use previous values. - if entryNum == 0 { - entry[entryNum].UncompressedOffset = uOff - continue - } - - // Uncompressed uses previous offset and adds EstBlockSize - entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff -} - - -// Guess that the first block will be 50% of uncompressed size. -// Integer truncating division must be used. -CompressGuess := EstBlockSize / 2 - -// Read Compressed entries. -// Each assumes CompressGuess delta from previous. -// CompressGuess is adjusted for each value. -for each entry { - cOff = ReadVarInt // Read value from stream - - // Except for the first entry, use previous values. - if entryNum == 0 { - entry[entryNum].CompressedOffset = cOff - continue - } - - // Compressed uses previous and our estimate. - entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff - - // Adjust compressed offset for next loop, integer truncating division must be used. - CompressGuess += cOff/2 -} -``` - -To decode from any given uncompressed offset `(wantOffset)`: - -* Iterate entries until `entry[n].UncompressedOffset > wantOffset`. -* Start decoding from `entry[n-1].CompressedOffset`. -* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. - -See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. - - -# Format Extensions - -* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. -* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). -* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. - -Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. - -The length is specified by reading the 3-bit length specified in the tag and decode using this table: - -| Length | Actual Length | -|--------|----------------------| -| 0 | 4 | -| 1 | 5 | -| 2 | 6 | -| 3 | 7 | -| 4 | 8 | -| 5 | 8 + read 1 byte | -| 6 | 260 + read 2 bytes | -| 7 | 65540 + read 3 bytes | - -This allows any repeat offset + length to be represented by 2 to 5 bytes. -It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. - -Lengths are stored as little endian values. - -The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. - -Default streaming block size is 1MB. - -# Dictionary Encoding - -Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. - -A dictionary provides an initial repeat value that can be used to point to a common header. - -Other than that the dictionary contains values that can be used as back-references. - -Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. - -## Format - -Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). - -Encoding: `[repeat value (uvarint)][dictionary content...]` - -Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. -This value is an offset into the dictionary content and not a back-reference offset, -so setting this to 0 will make the repeat value point to the first value of the dictionary. - -The value must be less than the dictionary length-8 - -## Encoding - -From the decoder point of view the dictionary content is seen as preceding the encoded content. - -`[dictionary content][decoded output]` - -Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. - -Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. -However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. - -The first match can be a repeat value, which will use the repeat offset stored in the dictionary. - -When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, -neither by a copy nor repeat operations. -If the boundary is crossed while copying from the dictionary, the operation should complete, -but the next instruction is not allowed to reference the dictionary. - -Valid blocks encoded *without* a dictionary can be decoded with any dictionary. -There are no checks whether the supplied dictionary is the correct for a block. -Because of this there is no overhead by using a dictionary. - -## Example - -This is the dictionary content. Elements are separated by `[]`. - -Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. - -Initial repeat offset is set at 10, which is the letter `2`. - -Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` - -Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` - -Output: `10 bananas which were brown were added` - - -## Streams - -For streams each block can use the dictionary. - -The dictionary cannot not currently be provided on the stream. - - -# LICENSE - -This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. - -Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go deleted file mode 100644 index 264ffd0a9..000000000 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "encoding/binary" - "errors" - "fmt" - "strconv" - - "github.com/klauspost/compress/internal/race" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("s2: corrupt input") - // ErrCRC reports that the input failed CRC validation (streams only) - ErrCRC = errors.New("s2: corrupt input, crc mismatch") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("s2: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("s2: unsupported input") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= cap(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - - race.WriteSlice(dst) - race.ReadSlice(src[s:]) - - if s2Decode(dst, src[s:]) != 0 { - return nil, ErrCorrupt - } - return dst, nil -} - -// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func s2DecodeDict(dst, src []byte, dict *Dict) int { - if dict == nil { - return s2Decode(dst, src) - } - const debug = false - const debugErrs = debug - - if debug { - fmt.Println("Starting decode, dst len:", len(dst)) - } - var d, s, length int - offset := len(dict.dict) - dict.repeat - - // As long as we can read at least 5 bytes... - for s < len(src)-5 { - // Removing bounds checks is SLOWER, when if doing - // in := src[s:s+5] - // Checked on Go 1.18 - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - x = uint32(src[s-1]) - case x == 61: - in := src[s : s+3] - x = uint32(in[1]) | uint32(in[2])<<8 - s += 3 - case x == 62: - in := src[s : s+4] - // Load as 32 bit and shift down. - x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x >>= 8 - s += 4 - case x == 63: - in := src[s : s+5] - x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 - s += 5 - } - length = int(x) + 1 - if debug { - fmt.Println("literals, length:", length, "d-after:", d+length) - } - if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { - if debugErrs { - fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) - } - return decodeErrCodeCorrupt - } - - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - length = int(src[s-2]) >> 2 & 0x7 - if toffset == 0 { - if debug { - fmt.Print("(repeat) ") - } - // keep last offset - switch length { - case 5: - length = int(src[s]) + 4 - s += 1 - case 6: - in := src[s : s+2] - length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) - s += 2 - case 7: - in := src[s : s+3] - length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) - s += 3 - default: // 0-> 4 - } - } else { - offset = toffset - } - length += 4 - case tagCopy2: - in := src[s : s+3] - offset = int(uint32(in[1]) | uint32(in[2])<<8) - length = 1 + int(in[0])>>2 - s += 3 - - case tagCopy4: - in := src[s : s+5] - offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) - length = 1 + int(in[0])>>2 - s += 5 - } - - if offset <= 0 || length > len(dst)-d { - if debugErrs { - fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) - } - return decodeErrCodeCorrupt - } - - // copy from dict - if d < offset { - if d > MaxDictSrcOffset { - if debugErrs { - fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) - } - return decodeErrCodeCorrupt - } - startOff := len(dict.dict) - offset + d - if startOff < 0 || startOff+length > len(dict.dict) { - if debugErrs { - fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) - } - return decodeErrCodeCorrupt - } - if debug { - fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) - } - copy(dst[d:d+length], dict.dict[startOff:]) - d += length - continue - } - - if debug { - fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) - } - - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset > length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - - // Remaining with extra checks... - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { - if debugErrs { - fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) - } - return decodeErrCodeCorrupt - } - if debug { - fmt.Println("literals, length:", length, "d-after:", d+length) - } - - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = int(src[s-2]) >> 2 & 0x7 - toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - if toffset == 0 { - if debug { - fmt.Print("(repeat) ") - } - // keep last offset - switch length { - case 5: - s += 1 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-1])) + 4 - case 6: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) - case 7: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) - default: // 0-> 4 - } - } else { - offset = toffset - } - length += 4 - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - if debugErrs { - fmt.Println("src went oob") - } - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || length > len(dst)-d { - if debugErrs { - fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) - } - return decodeErrCodeCorrupt - } - - // copy from dict - if d < offset { - if d > MaxDictSrcOffset { - if debugErrs { - fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) - } - return decodeErrCodeCorrupt - } - rOff := len(dict.dict) - (offset - d) - if debug { - fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) - } - if rOff+length > len(dict.dict) { - if debugErrs { - fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) - } - return decodeErrCodeCorrupt - } - if rOff < 0 { - if debugErrs { - fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) - } - return decodeErrCodeCorrupt - } - copy(dst[d:d+length], dict.dict[rOff:]) - d += length - continue - } - - if debug { - fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) - } - - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset > length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - - if d != len(dst) { - if debugErrs { - fmt.Println("wanted length", len(dst), "got", d) - } - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s deleted file mode 100644 index 9b105e03c..000000000 --- a/vendor/github.com/klauspost/compress/s2/decode_amd64.s +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -#define R_TMP0 AX -#define R_TMP1 BX -#define R_LEN CX -#define R_OFF DX -#define R_SRC SI -#define R_DST DI -#define R_DBASE R8 -#define R_DLEN R9 -#define R_DEND R10 -#define R_SBASE R11 -#define R_SLEN R12 -#define R_SEND R13 -#define R_TMP2 R14 -#define R_TMP3 R15 - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R_TMP0 scratch -// - R_TMP1 scratch -// - R_LEN length or x (shared) -// - R_OFF offset -// - R_SRC &src[s] -// - R_DST &dst[d] -// + R_DBASE dst_base -// + R_DLEN dst_len -// + R_DEND dst_base + dst_len -// + R_SBASE src_base -// + R_SLEN src_len -// + R_SEND src_base + src_len -// - R_TMP2 used by doCopy -// - R_TMP3 used by doCopy -// -// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. -// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. -TEXT ·s2Decode(SB), NOSPLIT, $48-56 - // Initialize R_SRC, R_DST and R_DBASE-R_SEND. - MOVQ dst_base+0(FP), R_DBASE - MOVQ dst_len+8(FP), R_DLEN - MOVQ R_DBASE, R_DST - MOVQ R_DBASE, R_DEND - ADDQ R_DLEN, R_DEND - MOVQ src_base+24(FP), R_SBASE - MOVQ src_len+32(FP), R_SLEN - MOVQ R_SBASE, R_SRC - MOVQ R_SBASE, R_SEND - ADDQ R_SLEN, R_SEND - XORQ R_OFF, R_OFF - -loop: - // for s < len(src) - CMPQ R_SRC, R_SEND - JEQ end - - // R_LEN = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (R_SRC), R_LEN - MOVL R_LEN, R_TMP1 - ANDL $3, R_TMP1 - CMPL R_TMP1, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, R_LEN - CMPL R_LEN, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ R_SRC - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R_LEN == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R_LEN can hold 64 bits, so the increment cannot overflow. - INCQ R_LEN - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R_TMP0 = len(dst) - d - // R_TMP1 = len(src) - s - MOVQ R_DEND, R_TMP0 - SUBQ R_DST, R_TMP0 - MOVQ R_SEND, R_TMP1 - SUBQ R_SRC, R_TMP1 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ R_LEN, $16 - JGT callMemmove - CMPQ R_TMP0, $16 - JLT callMemmove - CMPQ R_TMP1, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R_SRC), X0 - MOVOU X0, 0(R_DST) - - // d += length - // s += length - ADDQ R_LEN, R_DST - ADDQ R_LEN, R_SRC - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ R_LEN, R_TMP0 - JGT errCorrupt - CMPQ R_LEN, R_TMP1 - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ R_DST, 0(SP) - MOVQ R_SRC, 8(SP) - MOVQ R_LEN, 16(SP) - MOVQ R_DST, 24(SP) - MOVQ R_SRC, 32(SP) - MOVQ R_LEN, 40(SP) - MOVQ R_OFF, 48(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R_DBASE-R_SEND. - MOVQ 24(SP), R_DST - MOVQ 32(SP), R_SRC - MOVQ 40(SP), R_LEN - MOVQ 48(SP), R_OFF - MOVQ dst_base+0(FP), R_DBASE - MOVQ dst_len+8(FP), R_DLEN - MOVQ R_DBASE, R_DEND - ADDQ R_DLEN, R_DEND - MOVQ src_base+24(FP), R_SBASE - MOVQ src_len+32(FP), R_SLEN - MOVQ R_SBASE, R_SEND - ADDQ R_SLEN, R_SEND - - // d += length - // s += length - ADDQ R_LEN, R_DST - ADDQ R_LEN, R_SRC - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ R_LEN, R_SRC - SUBQ $58, R_SRC - CMPQ R_SRC, R_SEND - JA errCorrupt - - // case x == 60: - CMPL R_LEN, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(R_SRC), R_LEN - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(R_SRC), R_LEN - JMP doLit - -tagLit62Plus: - CMPL R_LEN, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - // We read one byte, safe to read one back, since we are just reading tag. - // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 - MOVL -4(R_SRC), R_LEN - SHRL $8, R_LEN - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(R_SRC), R_LEN - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, R_LEN - INCQ R_LEN - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(R_SRC), R_OFF - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, R_LEN - INCQ R_LEN - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(R_SRC), R_OFF - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R_TMP1 == src[s] & 0x03 - // - R_LEN == src[s] - CMPQ R_TMP1, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - // length = 4 + int(src[s-2])>>2&0x7 - MOVBQZX -1(R_SRC), R_TMP1 - MOVQ R_LEN, R_TMP0 - SHRQ $2, R_LEN - ANDQ $0xe0, R_TMP0 - ANDQ $7, R_LEN - SHLQ $3, R_TMP0 - ADDQ $4, R_LEN - ORQ R_TMP1, R_TMP0 - - // check if repeat code, ZF set by ORQ. - JZ repeatCode - - // This is a regular copy, transfer our temporary value to R_OFF (length) - MOVQ R_TMP0, R_OFF - JMP doCopy - -// This is a repeat code. -repeatCode: - // If length < 9, reuse last offset, with the length already calculated. - CMPQ R_LEN, $9 - JL doCopyRepeat - - // Read additional bytes for length. - JE repeatLen1 - - // Rare, so the extra branch shouldn't hurt too much. - CMPQ R_LEN, $10 - JE repeatLen2 - JMP repeatLen3 - -// Read repeat lengths. -repeatLen1: - // s ++ - ADDQ $1, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // length = src[s-1] + 8 - MOVBQZX -1(R_SRC), R_LEN - ADDL $8, R_LEN - JMP doCopyRepeat - -repeatLen2: - // s +=2 - ADDQ $2, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) - MOVWQZX -2(R_SRC), R_LEN - ADDL $260, R_LEN - JMP doCopyRepeat - -repeatLen3: - // s +=3 - ADDQ $3, R_SRC - - // if uint(s) > uint(len(src)) { etc } - CMPQ R_SRC, R_SEND - JA errCorrupt - - // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) - // Read one byte further back (just part of the tag, shifted out) - MOVL -4(R_SRC), R_LEN - SHRL $8, R_LEN - ADDL $65540, R_LEN - JMP doCopyRepeat - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R_LEN == length && R_LEN > 0 - // - R_OFF == offset - - // if d < offset { etc } - MOVQ R_DST, R_TMP1 - SUBQ R_DBASE, R_TMP1 - CMPQ R_TMP1, R_OFF - JLT errCorrupt - - // Repeat values can skip the test above, since any offset > 0 will be in dst. -doCopyRepeat: - // if offset <= 0 { etc } - CMPQ R_OFF, $0 - JLE errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R_DEND, R_TMP1 - SUBQ R_DST, R_TMP1 - CMPQ R_LEN, R_TMP1 - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R_TMP2 = len(dst)-d - // - R_TMP3 = &dst[d-offset] - MOVQ R_DEND, R_TMP2 - SUBQ R_DST, R_TMP2 - MOVQ R_DST, R_TMP3 - SUBQ R_OFF, R_TMP3 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ R_LEN, $16 - JGT slowForwardCopy - CMPQ R_OFF, $8 - JLT slowForwardCopy - CMPQ R_TMP2, $16 - JLT slowForwardCopy - MOVQ 0(R_TMP3), R_TMP0 - MOVQ R_TMP0, 0(R_DST) - MOVQ 8(R_TMP3), R_TMP1 - MOVQ R_TMP1, 8(R_DST) - ADDQ R_LEN, R_DST - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R_TMP2 - CMPQ R_LEN, R_TMP2 - JGT verySlowForwardCopy - - // We want to keep the offset, so we use R_TMP2 from here. - MOVQ R_OFF, R_TMP2 - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R_TMP3, is unchanged. - // } - CMPQ R_TMP2, $8 - JGE fixUpSlowForwardCopy - MOVQ (R_TMP3), R_TMP1 - MOVQ R_TMP1, (R_DST) - SUBQ R_TMP2, R_LEN - ADDQ R_TMP2, R_DST - ADDQ R_TMP2, R_TMP2 - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R_DST being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ R_DST, R_TMP0 - ADDQ R_LEN, R_DST - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ R_LEN, $0 - JLE loop - MOVQ (R_TMP3), R_TMP1 - MOVQ R_TMP1, (R_TMP0) - ADDQ $8, R_TMP3 - ADDQ $8, R_TMP0 - SUBQ $8, R_LEN - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R_TMP3), R_TMP1 - MOVB R_TMP1, (R_DST) - INCQ R_TMP3 - INCQ R_DST - DECQ R_LEN - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ R_DST, R_DEND - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s deleted file mode 100644 index 78e463f34..000000000 --- a/vendor/github.com/klauspost/compress/s2/decode_arm64.s +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -#define R_TMP0 R2 -#define R_TMP1 R3 -#define R_LEN R4 -#define R_OFF R5 -#define R_SRC R6 -#define R_DST R7 -#define R_DBASE R8 -#define R_DLEN R9 -#define R_DEND R10 -#define R_SBASE R11 -#define R_SLEN R12 -#define R_SEND R13 -#define R_TMP2 R14 -#define R_TMP3 R15 - -// TEST_SRC will check if R_SRC is <= SRC_END -#define TEST_SRC() \ - CMP R_SEND, R_SRC \ - BGT errCorrupt - -// MOVD R_SRC, R_TMP1 -// SUB R_SBASE, R_TMP1, R_TMP1 -// CMP R_SLEN, R_TMP1 -// BGT errCorrupt - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R_TMP0 scratch -// - R_TMP1 scratch -// - R_LEN length or x -// - R_OFF offset -// - R_SRC &src[s] -// - R_DST &dst[d] -// + R_DBASE dst_base -// + R_DLEN dst_len -// + R_DEND dst_base + dst_len -// + R_SBASE src_base -// + R_SLEN src_len -// + R_SEND src_base + src_len -// - R_TMP2 used by doCopy -// - R_TMP3 used by doCopy -// -// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. -// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. -TEXT ·s2Decode(SB), NOSPLIT, $56-56 - // Initialize R_SRC, R_DST and R_DBASE-R_SEND. - MOVD dst_base+0(FP), R_DBASE - MOVD dst_len+8(FP), R_DLEN - MOVD R_DBASE, R_DST - MOVD R_DBASE, R_DEND - ADD R_DLEN, R_DEND, R_DEND - MOVD src_base+24(FP), R_SBASE - MOVD src_len+32(FP), R_SLEN - MOVD R_SBASE, R_SRC - MOVD R_SBASE, R_SEND - ADD R_SLEN, R_SEND, R_SEND - MOVD $0, R_OFF - -loop: - // for s < len(src) - CMP R_SEND, R_SRC - BEQ end - - // R_LEN = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R_SRC), R_LEN - MOVW R_LEN, R_TMP1 - ANDW $3, R_TMP1 - MOVW $1, R1 - CMPW R1, R_TMP1 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R_LEN, R_LEN - CMPW R_LEN, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R_SRC, R_SRC - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R_LEN == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R_LEN can hold 64 bits, so the increment cannot overflow. - ADD $1, R_LEN, R_LEN - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R_TMP0 = len(dst) - d - // R_TMP1 = len(src) - s - MOVD R_DEND, R_TMP0 - SUB R_DST, R_TMP0, R_TMP0 - MOVD R_SEND, R_TMP1 - SUB R_SRC, R_TMP1, R_TMP1 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R_LEN - BGT callMemmove - CMP $16, R_TMP0 - BLT callMemmove - CMP $16, R_TMP1 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R_SRC), (R_TMP2, R_TMP3) - STP (R_TMP2, R_TMP3), 0(R_DST) - - // d += length - // s += length - ADD R_LEN, R_DST, R_DST - ADD R_LEN, R_SRC, R_SRC - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R_TMP0, R_LEN - BGT errCorrupt - CMP R_TMP1, R_LEN - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R_DST, 8(RSP) - MOVD R_SRC, 16(RSP) - MOVD R_LEN, 24(RSP) - MOVD R_DST, 32(RSP) - MOVD R_SRC, 40(RSP) - MOVD R_LEN, 48(RSP) - MOVD R_OFF, 56(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R_DBASE-R_SEND. - MOVD 32(RSP), R_DST - MOVD 40(RSP), R_SRC - MOVD 48(RSP), R_LEN - MOVD 56(RSP), R_OFF - MOVD dst_base+0(FP), R_DBASE - MOVD dst_len+8(FP), R_DLEN - MOVD R_DBASE, R_DEND - ADD R_DLEN, R_DEND, R_DEND - MOVD src_base+24(FP), R_SBASE - MOVD src_len+32(FP), R_SLEN - MOVD R_SBASE, R_SEND - ADD R_SLEN, R_SEND, R_SEND - - // d += length - // s += length - ADD R_LEN, R_DST, R_DST - ADD R_LEN, R_SRC, R_SRC - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R_LEN, R_SRC, R_SRC - SUB $58, R_SRC, R_SRC - TEST_SRC() - - // case x == 60: - MOVW $61, R1 - CMPW R1, R_LEN - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R_SRC), R_LEN - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R_SRC), R_LEN - B doLit - -tagLit62Plus: - CMPW $62, R_LEN - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R_SRC), R_LEN - MOVBU -1(R_SRC), R_TMP1 - ORR R_TMP1<<16, R_LEN - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R_SRC), R_LEN - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - MOVD R_SRC, R_TMP1 - SUB R_SBASE, R_TMP1, R_TMP1 - CMP R_SLEN, R_TMP1 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R_LEN>>2, R1, R_LEN - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R_SRC), R_OFF - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - TEST_SRC() - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R_LEN>>2, R1, R_LEN - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R_SRC), R_OFF - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R_TMP1 == src[s] & 0x03 - // - R_LEN == src[s] - CMP $2, R_TMP1 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - TEST_SRC() - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - // Calculate offset in R_TMP0 in case it is a repeat. - MOVD R_LEN, R_TMP0 - AND $0xe0, R_TMP0 - MOVBU -1(R_SRC), R_TMP1 - ORR R_TMP0<<3, R_TMP1, R_TMP0 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R_LEN>>2, R1, R_LEN - ADD $4, R_LEN, R_LEN - - // check if repeat code with offset 0. - CMP $0, R_TMP0 - BEQ repeatCode - - // This is a regular copy, transfer our temporary value to R_OFF (offset) - MOVD R_TMP0, R_OFF - B doCopy - - // This is a repeat code. -repeatCode: - // If length < 9, reuse last offset, with the length already calculated. - CMP $9, R_LEN - BLT doCopyRepeat - BEQ repeatLen1 - CMP $10, R_LEN - BEQ repeatLen2 - -repeatLen3: - // s +=3 - ADD $3, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - TEST_SRC() - - // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 - MOVBU -1(R_SRC), R_TMP0 - MOVHU -3(R_SRC), R_LEN - ORR R_TMP0<<16, R_LEN, R_LEN - ADD $65540, R_LEN, R_LEN - B doCopyRepeat - -repeatLen2: - // s +=2 - ADD $2, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - TEST_SRC() - - // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 - MOVHU -2(R_SRC), R_LEN - ADD $260, R_LEN, R_LEN - B doCopyRepeat - -repeatLen1: - // s +=1 - ADD $1, R_SRC, R_SRC - - // if uint(s) > uint(len(src)) { etc } - TEST_SRC() - - // length = src[s-1] + 8 - MOVBU -1(R_SRC), R_LEN - ADD $8, R_LEN, R_LEN - B doCopyRepeat - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R_LEN == length && R_LEN > 0 - // - R_OFF == offset - - // if d < offset { etc } - MOVD R_DST, R_TMP1 - SUB R_DBASE, R_TMP1, R_TMP1 - CMP R_OFF, R_TMP1 - BLT errCorrupt - - // Repeat values can skip the test above, since any offset > 0 will be in dst. -doCopyRepeat: - - // if offset <= 0 { etc } - CMP $0, R_OFF - BLE errCorrupt - - // if length > len(dst)-d { etc } - MOVD R_DEND, R_TMP1 - SUB R_DST, R_TMP1, R_TMP1 - CMP R_TMP1, R_LEN - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R_TMP2 = len(dst)-d - // - R_TMP3 = &dst[d-offset] - MOVD R_DEND, R_TMP2 - SUB R_DST, R_TMP2, R_TMP2 - MOVD R_DST, R_TMP3 - SUB R_OFF, R_TMP3, R_TMP3 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R_LEN - BGT slowForwardCopy - CMP $8, R_OFF - BLT slowForwardCopy - CMP $16, R_TMP2 - BLT slowForwardCopy - MOVD 0(R_TMP3), R_TMP0 - MOVD R_TMP0, 0(R_DST) - MOVD 8(R_TMP3), R_TMP1 - MOVD R_TMP1, 8(R_DST) - ADD R_LEN, R_DST, R_DST - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R_TMP2, R_TMP2 - CMP R_TMP2, R_LEN - BGT verySlowForwardCopy - - // We want to keep the offset, so we use R_TMP2 from here. - MOVD R_OFF, R_TMP2 - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R_TMP3, is unchanged. - // } - CMP $8, R_TMP2 - BGE fixUpSlowForwardCopy - MOVD (R_TMP3), R_TMP1 - MOVD R_TMP1, (R_DST) - SUB R_TMP2, R_LEN, R_LEN - ADD R_TMP2, R_DST, R_DST - ADD R_TMP2, R_TMP2, R_TMP2 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R_DST being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R_DST, R_TMP0 - ADD R_LEN, R_DST, R_DST - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R_LEN - BLE loop - MOVD (R_TMP3), R_TMP1 - MOVD R_TMP1, (R_TMP0) - ADD $8, R_TMP3, R_TMP3 - ADD $8, R_TMP0, R_TMP0 - SUB $8, R_LEN, R_LEN - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R_TMP3), R_TMP1 - MOVB R_TMP1, (R_DST) - ADD $1, R_TMP3, R_TMP3 - ADD $1, R_DST, R_DST - SUB $1, R_LEN, R_LEN - CBNZ R_LEN, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R_DEND, R_DST - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R_TMP0 - MOVD R_TMP0, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go deleted file mode 100644 index cb3576edd..000000000 --- a/vendor/github.com/klauspost/compress/s2/decode_asm.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || arm64) && !appengine && gc && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !noasm - -package s2 - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go deleted file mode 100644 index 2cb55c2c7..000000000 --- a/vendor/github.com/klauspost/compress/s2/decode_other.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !arm64) || appengine || !gc || noasm -// +build !amd64,!arm64 appengine !gc noasm - -package s2 - -import ( - "fmt" - "strconv" -) - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func s2Decode(dst, src []byte) int { - const debug = false - if debug { - fmt.Println("Starting decode, dst len:", len(dst)) - } - var d, s, length int - offset := 0 - - // As long as we can read at least 5 bytes... - for s < len(src)-5 { - // Removing bounds checks is SLOWER, when if doing - // in := src[s:s+5] - // Checked on Go 1.18 - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - x = uint32(src[s-1]) - case x == 61: - in := src[s : s+3] - x = uint32(in[1]) | uint32(in[2])<<8 - s += 3 - case x == 62: - in := src[s : s+4] - // Load as 32 bit and shift down. - x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 - x >>= 8 - s += 4 - case x == 63: - in := src[s : s+5] - x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 - s += 5 - } - length = int(x) + 1 - if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { - if debug { - fmt.Println("corrupt: lit size", length) - } - return decodeErrCodeCorrupt - } - if debug { - fmt.Println("literals, length:", length, "d-after:", d+length) - } - - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - length = int(src[s-2]) >> 2 & 0x7 - if toffset == 0 { - if debug { - fmt.Print("(repeat) ") - } - // keep last offset - switch length { - case 5: - length = int(src[s]) + 4 - s += 1 - case 6: - in := src[s : s+2] - length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) - s += 2 - case 7: - in := src[s : s+3] - length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) - s += 3 - default: // 0-> 4 - } - } else { - offset = toffset - } - length += 4 - case tagCopy2: - in := src[s : s+3] - offset = int(uint32(in[1]) | uint32(in[2])<<8) - length = 1 + int(in[0])>>2 - s += 3 - - case tagCopy4: - in := src[s : s+5] - offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) - length = 1 + int(in[0])>>2 - s += 5 - } - - if offset <= 0 || d < offset || length > len(dst)-d { - if debug { - fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) - } - - return decodeErrCodeCorrupt - } - - if debug { - fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) - } - - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset > length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - - // Remaining with extra checks... - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { - if debug { - fmt.Println("corrupt: lit size", length) - } - return decodeErrCodeCorrupt - } - if debug { - fmt.Println("literals, length:", length, "d-after:", d+length) - } - - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = int(src[s-2]) >> 2 & 0x7 - toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - if toffset == 0 { - if debug { - fmt.Print("(repeat) ") - } - // keep last offset - switch length { - case 5: - s += 1 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-1])) + 4 - case 6: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) - case 7: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) - default: // 0-> 4 - } - } else { - offset = toffset - } - length += 4 - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - if debug { - fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) - } - return decodeErrCodeCorrupt - } - - if debug { - fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) - } - - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset > length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go deleted file mode 100644 index f125ad096..000000000 --- a/vendor/github.com/klauspost/compress/s2/dict.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright (c) 2022+ Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "bytes" - "encoding/binary" - "sync" -) - -const ( - // MinDictSize is the minimum dictionary size when repeat has been read. - MinDictSize = 16 - - // MaxDictSize is the maximum dictionary size when repeat has been read. - MaxDictSize = 65536 - - // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. - MaxDictSrcOffset = 65535 -) - -// Dict contains a dictionary that can be used for encoding and decoding s2 -type Dict struct { - dict []byte - repeat int // Repeat as index of dict - - fast, better, best sync.Once - fastTable *[1 << 14]uint16 - - betterTableShort *[1 << 14]uint16 - betterTableLong *[1 << 17]uint16 - - bestTableShort *[1 << 16]uint32 - bestTableLong *[1 << 19]uint32 -} - -// NewDict will read a dictionary. -// It will return nil if the dictionary is invalid. -func NewDict(dict []byte) *Dict { - if len(dict) == 0 { - return nil - } - var d Dict - // Repeat is the first value of the dict - r, n := binary.Uvarint(dict) - if n <= 0 { - return nil - } - dict = dict[n:] - d.dict = dict - if cap(d.dict) < len(d.dict)+16 { - d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) - } - if len(dict) < MinDictSize || len(dict) > MaxDictSize { - return nil - } - d.repeat = int(r) - if d.repeat > len(dict) { - return nil - } - return &d -} - -// Bytes will return a serialized version of the dictionary. -// The output can be sent to NewDict. -func (d *Dict) Bytes() []byte { - dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) - return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) -} - -// MakeDict will create a dictionary. -// 'data' must be at least MinDictSize. -// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. -// If searchStart is set the start repeat value will be set to the last -// match of this content. -// If no matches are found, it will attempt to find shorter matches. -// This content should match the typical start of a block. -// If at least 4 bytes cannot be matched, repeat is set to start of block. -func MakeDict(data []byte, searchStart []byte) *Dict { - if len(data) == 0 { - return nil - } - if len(data) > MaxDictSize { - data = data[len(data)-MaxDictSize:] - } - var d Dict - dict := data - d.dict = dict - if cap(d.dict) < len(d.dict)+16 { - d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) - } - if len(dict) < MinDictSize { - return nil - } - - // Find the longest match possible, last entry if multiple. - for s := len(searchStart); s > 4; s-- { - if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { - d.repeat = idx - break - } - } - - return &d -} - -// MakeDictManual will create a dictionary. -// 'data' must be at least MinDictSize and less than or equal to MaxDictSize. -// A manual first repeat index into data must be provided. -// It must be less than len(data)-8. -func MakeDictManual(data []byte, firstIdx uint16) *Dict { - if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize { - return nil - } - var d Dict - dict := data - d.dict = dict - if cap(d.dict) < len(d.dict)+16 { - d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) - } - - d.repeat = int(firstIdx) - return &d -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func (d *Dict) Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if cap(dst) < n { - dst = make([]byte, n) - } else { - dst = dst[:n] - } - - // The block starts with the varint-encoded length of the decompressed bytes. - dstP := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:dstP] - } - if len(src) < minNonLiteralBlockSize { - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] - } - n := encodeBlockDictGo(dst[dstP:], src, d) - if n > 0 { - dstP += n - return dst[:dstP] - } - // Not compressible - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] -} - -// EncodeBetter returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// EncodeBetter compresses better than Encode but typically with a -// 10-40% speed decrease on both compression and decompression. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func (d *Dict) EncodeBetter(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - dstP := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:dstP] - } - if len(src) < minNonLiteralBlockSize { - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] - } - n := encodeBlockBetterDict(dst[dstP:], src, d) - if n > 0 { - dstP += n - return dst[:dstP] - } - // Not compressible - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] -} - -// EncodeBest returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// EncodeBest compresses as good as reasonably possible but with a -// big speed decrease. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func (d *Dict) EncodeBest(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - dstP := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:dstP] - } - if len(src) < minNonLiteralBlockSize { - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] - } - n := encodeBlockBest(dst[dstP:], src, d) - if n > 0 { - dstP += n - return dst[:dstP] - } - // Not compressible - dstP += emitLiteral(dst[dstP:], src) - return dst[:dstP] -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func (d *Dict) Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= cap(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - if s2DecodeDict(dst, src[s:], d) != 0 { - return nil, ErrCorrupt - } - return dst, nil -} - -func (d *Dict) initFast() { - d.fast.Do(func() { - const ( - tableBits = 14 - maxTableSize = 1 << tableBits - ) - - var table [maxTableSize]uint16 - // We stop so any entry of length 8 can always be read. - for i := 0; i < len(d.dict)-8-2; i += 3 { - x0 := load64(d.dict, i) - h0 := hash6(x0, tableBits) - h1 := hash6(x0>>8, tableBits) - h2 := hash6(x0>>16, tableBits) - table[h0] = uint16(i) - table[h1] = uint16(i + 1) - table[h2] = uint16(i + 2) - } - d.fastTable = &table - }) -} - -func (d *Dict) initBetter() { - d.better.Do(func() { - const ( - // Long hash matches. - lTableBits = 17 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 14 - maxSTableSize = 1 << sTableBits - ) - - var lTable [maxLTableSize]uint16 - var sTable [maxSTableSize]uint16 - - // We stop so any entry of length 8 can always be read. - for i := 0; i < len(d.dict)-8; i++ { - cv := load64(d.dict, i) - lTable[hash7(cv, lTableBits)] = uint16(i) - sTable[hash4(cv, sTableBits)] = uint16(i) - } - d.betterTableShort = &sTable - d.betterTableLong = &lTable - }) -} - -func (d *Dict) initBest() { - d.best.Do(func() { - const ( - // Long hash matches. - lTableBits = 19 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 16 - maxSTableSize = 1 << sTableBits - ) - - var lTable [maxLTableSize]uint32 - var sTable [maxSTableSize]uint32 - - // We stop so any entry of length 8 can always be read. - for i := 0; i < len(d.dict)-8; i++ { - cv := load64(d.dict, i) - hashL := hash8(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL := lTable[hashL] - candidateS := sTable[hashS] - lTable[hashL] = uint32(i) | candidateL<<16 - sTable[hashS] = uint32(i) | candidateS<<16 - } - d.bestTableShort = &sTable - d.bestTableLong = &lTable - }) -} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go deleted file mode 100644 index 20b802270..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "encoding/binary" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/internal/race" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if cap(dst) < n { - dst = make([]byte, n) - } else { - dst = dst[:n] - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - n := encodeBlock(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -var estblockPool [2]sync.Pool - -// EstimateBlockSize will perform a very fast compression -// without outputting the result and return the compressed output size. -// The function returns -1 if no improvement could be achieved. -// Using actual compression will most often produce better compression than the estimate. -func EstimateBlockSize(src []byte) (d int) { - if len(src) <= inputMargin || int64(len(src)) > 0xffffffff { - return -1 - } - if len(src) <= 1024 { - const sz, pool = 2048, 0 - tmp, ok := estblockPool[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer estblockPool[pool].Put(tmp) - - d = calcBlockSizeSmall(src, tmp) - } else { - const sz, pool = 32768, 1 - tmp, ok := estblockPool[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer estblockPool[pool].Put(tmp) - - d = calcBlockSize(src, tmp) - } - - if d == 0 { - return -1 - } - // Size of the varint encoded block size. - d += (bits.Len64(uint64(len(src))) + 7) / 7 - - if d >= len(src) { - return -1 - } - return d -} - -// EncodeBetter returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// EncodeBetter compresses better than Encode but typically with a -// 10-40% speed decrease on both compression and decompression. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func EncodeBetter(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - n := encodeBlockBetter(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// EncodeBest returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// EncodeBest compresses as good as reasonably possible but with a -// big speed decrease. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func EncodeBest(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - n := encodeBlockBest(dst[d:], src, nil) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The output is Snappy compatible and will likely decompress faster. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func EncodeSnappy(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if cap(dst) < n { - dst = make([]byte, n) - } else { - dst = dst[:n] - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - - n := encodeBlockSnappy(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The output is Snappy compatible and will likely decompress faster. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func EncodeSnappyBetter(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if cap(dst) < n { - dst = make([]byte, n) - } else { - dst = dst[:n] - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - - n := encodeBlockBetterSnappy(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The output is Snappy compatible and will likely decompress faster. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// The blocks will require the same amount of memory to decode as encoding, -// and does not make for concurrent decoding. -// Also note that blocks do not contain CRC information, so corruption may be undetected. -// -// If you need to encode larger amounts of data, consider using -// the streaming interface which gives all of these features. -func EncodeSnappyBest(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if cap(dst) < n { - dst = make([]byte, n) - } else { - dst = dst[:n] - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - - n := encodeBlockBestSnappy(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. -// If the destination is nil or too small, a new will be allocated. -// The blocks are not validated, so garbage in = garbage out. -// dst may not overlap block data. -// Any data in dst is preserved as is, so it will not be considered a block. -func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { - totalSize := uint64(0) - compSize := 0 - for _, b := range blocks { - l, hdr, err := decodedLen(b) - if err != nil { - return nil, err - } - totalSize += uint64(l) - compSize += len(b) - hdr - } - if totalSize == 0 { - dst = append(dst, 0) - return dst, nil - } - if totalSize > math.MaxUint32 { - return nil, ErrTooLarge - } - var tmp [binary.MaxVarintLen32]byte - hdrSize := binary.PutUvarint(tmp[:], totalSize) - wantSize := hdrSize + compSize - - if cap(dst)-len(dst) < wantSize { - dst = append(make([]byte, 0, wantSize+len(dst)), dst...) - } - dst = append(dst, tmp[:hdrSize]...) - for _, b := range blocks { - _, hdr, err := decodedLen(b) - if err != nil { - return nil, err - } - dst = append(dst, b[hdr:]...) - } - return dst, nil -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 8 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// will be accepted by the encoder. -const minNonLiteralBlockSize = 32 - -const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits) - -// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. -// Blocks this big are highly discouraged, though. -// Half the size on 32 bit systems. -const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5 - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -// 32 bit platforms will have lower thresholds for rejecting big content. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if intReduction == 1 { - // 32 bits - if n > math.MaxInt32 { - // Also includes negative. - return -1 - } - } else if n > 0xffffffff { - // 64 bits - // Also includes negative. - return -1 - } - // Size of the varint encoded block size. - n = n + uint64((bits.Len64(n)+7)/7) - - // Add maximum size of encoding block as literals. - n += uint64(literalExtraSize(int64(srcLen))) - if intReduction == 1 { - // 32 bits - if n > math.MaxInt32 { - return -1 - } - } else if n > 0xffffffff { - // 64 bits - // Also includes negative. - return -1 - } - return int(n) -} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go deleted file mode 100644 index 997704569..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode_all.go +++ /dev/null @@ -1,1068 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "bytes" - "encoding/binary" - "fmt" - "math/bits" -) - -func load32(b []byte, i int) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - const prime6bytes = 227718039650203 - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -func encodeGo(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - if len(src) == 0 { - return dst[:d] - } - if len(src) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], src) - return dst[:d] - } - n := encodeBlockGo(dst[d:], src) - if n > 0 { - d += n - return dst[:d] - } - // Not compressible - d += emitLiteral(dst[d:], src) - return dst[:d] -} - -// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockGo(dst, src []byte) (d int) { - // Initialize the hash table. - const ( - tableBits = 14 - maxTableSize = 1 << tableBits - - debug = false - ) - - var table [maxTableSize]uint32 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 - - for { - candidate := 0 - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - if nextS > sLimit { - goto emitRemainder - } - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - candidate = int(table[hash0]) - candidate2 := int(table[hash1]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - - // Bail if we exceed the maximum size. - if d+(base-nextEmit) > dstLimit { - return 0 - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - - if uint32(cv) == load32(src, candidate) { - break - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - break - } - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards. - // The top bytes will be rechecked to get the full match. - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopy(dst[d:], repeat, s-base) - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if debug && s == candidate { - panic("s == candidate") - } - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -func encodeBlockSnappyGo(dst, src []byte) (d int) { - // Initialize the hash table. - const ( - tableBits = 14 - maxTableSize = 1 << tableBits - ) - - var table [maxTableSize]uint32 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 - - for { - candidate := 0 - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - if nextS > sLimit { - goto emitRemainder - } - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - candidate = int(table[hash0]) - candidate2 := int(table[hash1]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - // Bail if we exceed the maximum size. - if d+(base-nextEmit) > dstLimit { - return 0 - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeat(dst[d:], repeat, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - cv = load64(src, s) - continue - } - - if uint32(cv) == load32(src, candidate) { - break - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - break - } - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeat(dst[d:], repeat, s-base) - if false { - // Validate match. - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { - // Initialize the hash table. - const ( - tableBits = 14 - maxTableSize = 1 << tableBits - maxAhead = 8 // maximum bytes ahead without checking sLimit - - debug = false - ) - dict.initFast() - - var table [maxTableSize]uint32 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - if sLimit > MaxDictSrcOffset-maxAhead { - sLimit = MaxDictSrcOffset - maxAhead - } - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form can start with a dict entry (copy or repeat). - s := 0 - - // Convert dict repeat to offset - repeat := len(dict.dict) - dict.repeat - cv := load64(src, 0) - - // While in dict -searchDict: - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - if nextS > sLimit { - if debug { - fmt.Println("slimit reached", s, nextS) - } - break searchDict - } - candidateDict := int(dict.fastTable[hash0]) - candidateDict2 := int(dict.fastTable[hash1]) - candidate2 := int(table[hash1]) - candidate := int(table[hash0]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - - if repeat > s { - candidate := len(dict.dict) - repeat + s - if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { - // Extend back - base := s - for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { - i-- - base-- - } - // Bail if we exceed the maximum size. - if d+(base-nextEmit) > dstLimit { - return 0 - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - if debug && nextEmit != base { - fmt.Println("emitted ", base-nextEmit, "literals") - } - s += 4 - candidate += 4 - for candidate < len(dict.dict)-8 && s <= len(src)-8 { - if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - d += emitRepeat(dst[d:], repeat, s-base) - if debug { - fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - break searchDict - } - cv = load64(src, s) - continue - } - } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - if debug && nextEmit != base { - fmt.Println("emitted ", base-nextEmit, "literals") - } - - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } - - nextEmit = s - if s >= sLimit { - break searchDict - } - if debug { - fmt.Println("emitted reg repeat", s-base, "s:", s) - } - cv = load64(src, s) - continue searchDict - } - if s == 0 { - cv = load64(src, nextS) - s = nextS - continue searchDict - } - // Start with table. These matches will always be closer. - if uint32(cv) == load32(src, candidate) { - goto emitMatch - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - goto emitMatch - } - - // Check dict. Dicts have longer offsets, so we want longer matches. - if cv == load64(dict.dict, candidateDict) { - table[hash2] = uint32(s + 2) - goto emitDict - } - - candidateDict = int(dict.fastTable[hash2]) - // Check if upper 7 bytes match - if candidateDict2 >= 1 { - if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { - table[hash2] = uint32(s + 2) - candidateDict = candidateDict2 - s++ - goto emitDict - } - } - - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - goto emitMatch - } - if candidateDict >= 2 { - // Check if upper 6 bytes match - if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { - s += 2 - goto emitDict - } - } - - cv = load64(src, nextS) - s = nextS - continue searchDict - - emitDict: - { - if debug { - if load32(dict.dict, candidateDict) != load32(src, s) { - panic("dict emit mismatch") - } - } - // Extend backwards. - // The top bytes will be rechecked to get the full match. - for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { - candidateDict-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - if debug && nextEmit != s { - fmt.Println("emitted ", s-nextEmit, "literals") - } - { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = s + (len(dict.dict)) - candidateDict - - // Extend the 4-byte match as long as possible. - s += 4 - candidateDict += 4 - for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { - if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateDict += 8 - } - - // Matches longer than 64 are split. - if s <= sLimit || s-base < 8 { - d += emitCopy(dst[d:], repeat, s-base) - } else { - // Split to ensure we don't start a copy within next block - d += emitCopy(dst[d:], repeat, 4) - d += emitRepeat(dst[d:], repeat, s-base-4) - } - if false { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := dict.dict[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - if debug { - fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - break searchDict - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index and continue loop to try new candidate. - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>8, tableBits) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s - 1) - cv = load64(src, s) - } - continue - } - emitMatch: - - // Extend backwards. - // The top bytes will be rechecked to get the full match. - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - if debug && nextEmit != s { - fmt.Println("emitted ", s-nextEmit, "literals") - } - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopy(dst[d:], repeat, s-base) - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - if debug { - fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - break searchDict - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if debug && s == candidate { - panic("s == candidate") - } - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - - // Search without dict: - if repeat > s { - repeat = 0 - } - - // No more dict - sLimit = len(src) - inputMargin - if s >= sLimit { - goto emitRemainder - } - if debug { - fmt.Println("non-dict matching at", s, "repeat:", repeat) - } - cv = load64(src, s) - if debug { - fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) - } - for { - candidate := 0 - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - if nextS > sLimit { - goto emitRemainder - } - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - candidate = int(table[hash0]) - candidate2 := int(table[hash1]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - // Bail if we exceed the maximum size. - if d+(base-nextEmit) > dstLimit { - return 0 - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - if debug && nextEmit != base { - fmt.Println("emitted ", base-nextEmit, "literals") - } - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - if nextEmit > 0 { - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - } else { - // First match, cannot be repeat. - d += emitCopy(dst[d:], repeat, s-base) - } - if debug { - fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - cv = load64(src, s) - continue - } - - if uint32(cv) == load32(src, candidate) { - break - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - break - } - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards. - // The top bytes will be rechecked to get the full match. - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - if debug && nextEmit != s { - fmt.Println("emitted ", s-nextEmit, "literals") - } - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopy(dst[d:], repeat, s-base) - if debug { - // Validate match. - if s <= candidate { - panic("s <= candidate") - } - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - if debug { - fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if debug && s == candidate { - panic("s == candidate") - } - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - if debug && nextEmit != s { - fmt.Println("emitted ", len(src)-nextEmit, "literals") - } - } - return d -} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go deleted file mode 100644 index 7aadd255f..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ /dev/null @@ -1,317 +0,0 @@ -//go:build !appengine && !noasm && gc -// +build !appengine,!noasm,gc - -package s2 - -import ( - "sync" - - "github.com/klauspost/compress/internal/race" -) - -const hasAmd64Asm = true - -var encPools [4]sync.Pool - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - race.ReadSlice(src) - race.WriteSlice(dst) - - const ( - // Use 12 bit table when less than... - limit12B = 16 << 10 - // Use 10 bit table when less than... - limit10B = 4 << 10 - // Use 8 bit table when less than... - limit8B = 512 - ) - - if len(src) >= 4<<20 { - const sz, pool = 65536, 0 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeBlockAsm(dst, src, tmp) - } - if len(src) >= limit12B { - const sz, pool = 65536, 0 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeBlockAsm4MB(dst, src, tmp) - } - if len(src) >= limit10B { - const sz, pool = 16384, 1 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeBlockAsm12B(dst, src, tmp) - } - if len(src) >= limit8B { - const sz, pool = 4096, 2 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeBlockAsm10B(dst, src, tmp) - } - if len(src) < minNonLiteralBlockSize { - return 0 - } - const sz, pool = 1024, 3 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeBlockAsm8B(dst, src, tmp) -} - -var encBetterPools [5]sync.Pool - -// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBetter(dst, src []byte) (d int) { - race.ReadSlice(src) - race.WriteSlice(dst) - - const ( - // Use 12 bit table when less than... - limit12B = 16 << 10 - // Use 10 bit table when less than... - limit10B = 4 << 10 - // Use 8 bit table when less than... - limit8B = 512 - ) - - if len(src) > 4<<20 { - const sz, pool = 589824, 0 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeBetterBlockAsm(dst, src, tmp) - } - if len(src) >= limit12B { - const sz, pool = 589824, 0 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - - return encodeBetterBlockAsm4MB(dst, src, tmp) - } - if len(src) >= limit10B { - const sz, pool = 81920, 0 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - - return encodeBetterBlockAsm12B(dst, src, tmp) - } - if len(src) >= limit8B { - const sz, pool = 20480, 1 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeBetterBlockAsm10B(dst, src, tmp) - } - if len(src) < minNonLiteralBlockSize { - return 0 - } - - const sz, pool = 5120, 2 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeBetterBlockAsm8B(dst, src, tmp) -} - -// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockSnappy(dst, src []byte) (d int) { - race.ReadSlice(src) - race.WriteSlice(dst) - - const ( - // Use 12 bit table when less than... - limit12B = 16 << 10 - // Use 10 bit table when less than... - limit10B = 4 << 10 - // Use 8 bit table when less than... - limit8B = 512 - ) - if len(src) > 65536 { - const sz, pool = 65536, 0 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeSnappyBlockAsm(dst, src, tmp) - } - if len(src) >= limit12B { - const sz, pool = 65536, 0 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeSnappyBlockAsm64K(dst, src, tmp) - } - if len(src) >= limit10B { - const sz, pool = 16384, 1 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeSnappyBlockAsm12B(dst, src, tmp) - } - if len(src) >= limit8B { - const sz, pool = 4096, 2 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeSnappyBlockAsm10B(dst, src, tmp) - } - if len(src) < minNonLiteralBlockSize { - return 0 - } - const sz, pool = 1024, 3 - tmp, ok := encPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encPools[pool].Put(tmp) - return encodeSnappyBlockAsm8B(dst, src, tmp) -} - -// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBetterSnappy(dst, src []byte) (d int) { - race.ReadSlice(src) - race.WriteSlice(dst) - - const ( - // Use 12 bit table when less than... - limit12B = 16 << 10 - // Use 10 bit table when less than... - limit10B = 4 << 10 - // Use 8 bit table when less than... - limit8B = 512 - ) - if len(src) > 65536 { - const sz, pool = 589824, 0 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeSnappyBetterBlockAsm(dst, src, tmp) - } - - if len(src) >= limit12B { - const sz, pool = 294912, 4 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - - return encodeSnappyBetterBlockAsm64K(dst, src, tmp) - } - if len(src) >= limit10B { - const sz, pool = 81920, 0 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - - return encodeSnappyBetterBlockAsm12B(dst, src, tmp) - } - if len(src) >= limit8B { - const sz, pool = 20480, 1 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeSnappyBetterBlockAsm10B(dst, src, tmp) - } - if len(src) < minNonLiteralBlockSize { - return 0 - } - - const sz, pool = 5120, 2 - tmp, ok := encBetterPools[pool].Get().(*[sz]byte) - if !ok { - tmp = &[sz]byte{} - } - race.WriteSlice(tmp[:]) - defer encBetterPools[pool].Put(tmp) - return encodeSnappyBetterBlockAsm8B(dst, src, tmp) -} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go deleted file mode 100644 index 47bac7423..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode_best.go +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "fmt" - "math" - "math/bits" -) - -// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { - // Initialize the hash tables. - const ( - // Long hash matches. - lTableBits = 19 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 16 - maxSTableSize = 1 << sTableBits - - inputMargin = 8 + 2 - - debug = false - ) - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - if len(src) < minNonLiteralBlockSize { - return 0 - } - sLimitDict := len(src) - inputMargin - if sLimitDict > MaxDictSrcOffset-inputMargin { - sLimitDict = MaxDictSrcOffset - inputMargin - } - - var lTable [maxLTableSize]uint64 - var sTable [maxSTableSize]uint64 - - // Bail if we can't compress to at least this. - dstLimit := len(src) - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - repeat := 1 - if dict != nil { - dict.initBest() - s = 0 - repeat = len(dict.dict) - dict.repeat - } - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - const lowbitMask = 0xffffffff - getCur := func(x uint64) int { - return int(x & lowbitMask) - } - getPrev := func(x uint64) int { - return int(x >> 32) - } - const maxSkip = 64 - - for { - type match struct { - offset int - s int - length int - score int - rep, dict bool - } - var best match - for { - // Next src position to check - nextS := (s-nextEmit)>>8 + 1 - if nextS > maxSkip { - nextS = s + maxSkip - } else { - nextS += s - } - if nextS > sLimit { - goto emitRemainder - } - if dict != nil && s >= MaxDictSrcOffset { - dict = nil - if repeat > s { - repeat = math.MinInt32 - } - } - hashL := hash8(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL := lTable[hashL] - candidateS := sTable[hashS] - - score := func(m match) int { - // Matches that are longer forward are penalized since we must emit it as a literal. - score := m.length - m.s - if nextEmit == m.s { - // If we do not have to emit literals, we save 1 byte - score++ - } - offset := m.s - m.offset - if m.rep { - return score - emitRepeatSize(offset, m.length) - } - return score - emitCopySize(offset, m.length) - } - - matchAt := func(offset, s int, first uint32, rep bool) match { - if best.length != 0 && best.s-best.offset == s-offset { - // Don't retest if we have the same offset. - return match{offset: offset, s: s} - } - if load32(src, offset) != first { - return match{offset: offset, s: s} - } - m := match{offset: offset, s: s, length: 4 + offset, rep: rep} - s += 4 - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[m.length] { - m.length++ - s++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { - m.length += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - m.length += 8 - } - m.length -= offset - m.score = score(m) - if m.score <= -m.s { - // Eliminate if no savings, we might find a better one. - m.length = 0 - } - return m - } - matchDict := func(candidate, s int, first uint32, rep bool) match { - if s >= MaxDictSrcOffset { - return match{offset: candidate, s: s} - } - // Calculate offset as if in continuous array with s - offset := -len(dict.dict) + candidate - if best.length != 0 && best.s-best.offset == s-offset && !rep { - // Don't retest if we have the same offset. - return match{offset: offset, s: s} - } - - if load32(dict.dict, candidate) != first { - return match{offset: offset, s: s} - } - m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} - s += 4 - if !rep { - for s < sLimitDict && m.length < len(dict.dict) { - if len(src)-s < 8 || len(dict.dict)-m.length < 8 { - if src[s] == dict.dict[m.length] { - m.length++ - s++ - continue - } - break - } - if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { - m.length += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - m.length += 8 - } - } else { - for s < len(src) && m.length < len(dict.dict) { - if len(src)-s < 8 || len(dict.dict)-m.length < 8 { - if src[s] == dict.dict[m.length] { - m.length++ - s++ - continue - } - break - } - if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { - m.length += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - m.length += 8 - } - } - m.length -= candidate - m.score = score(m) - if m.score <= -m.s { - // Eliminate if no savings, we might find a better one. - m.length = 0 - } - return m - } - - bestOf := func(a, b match) match { - if b.length == 0 { - return a - } - if a.length == 0 { - return b - } - as := a.score + b.s - bs := b.score + a.s - if as >= bs { - return a - } - return b - } - - if s > 0 { - best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) - best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) - } - if dict != nil { - candidateL := dict.bestTableLong[hashL] - candidateS := dict.bestTableShort[hashS] - best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) - best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) - best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) - best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) - } - { - if (dict == nil || repeat <= s) && repeat > 0 { - best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) - } else if s-repeat < -4 && dict != nil { - candidate := len(dict.dict) - (repeat - s) - best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) - candidate++ - best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) - } - - if best.length > 0 { - hashS := hash4(cv>>8, sTableBits) - // s+1 - nextShort := sTable[hashS] - s := s + 1 - cv := load64(src, s) - hashL := hash8(cv, lTableBits) - nextLong := lTable[hashL] - best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) - best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) - - // Dict at + 1 - if dict != nil { - candidateL := dict.bestTableLong[hashL] - candidateS := dict.bestTableShort[hashS] - - best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) - best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) - } - - // s+2 - if true { - hashS := hash4(cv>>8, sTableBits) - - nextShort = sTable[hashS] - s++ - cv = load64(src, s) - hashL := hash8(cv, lTableBits) - nextLong = lTable[hashL] - - if (dict == nil || repeat <= s) && repeat > 0 { - // Repeat at + 2 - best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) - } else if repeat-s > 4 && dict != nil { - candidate := len(dict.dict) - (repeat - s) - best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) - } - best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) - best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) - best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) - - // Dict at +2 - // Very small gain - if dict != nil { - candidateL := dict.bestTableLong[hashL] - candidateS := dict.bestTableShort[hashS] - - best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) - best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) - } - } - // Search for a match at best match end, see if that is better. - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 1-2 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - const skipEnd = 1 - if sAt := best.s + best.length - skipEnd; sAt < sLimit { - - sBack := best.s + skipBeginning - skipEnd - backL := best.length - skipBeginning - // Load initial values - cv = load64(src, sBack) - - // Grab candidates... - next := lTable[hash8(load64(src, sAt), lTableBits)] - - if checkAt := getCur(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) - } - if checkAt := getPrev(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) - } - // Disabled: Extremely small gain - if false { - next = sTable[hash4(load64(src, sAt), sTableBits)] - if checkAt := getCur(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) - } - if checkAt := getPrev(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) - } - } - } - } - } - - // Update table - lTable[hashL] = uint64(s) | candidateL<<32 - sTable[hashS] = uint64(s) | candidateS<<32 - - if best.length > 0 { - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards, not needed for repeats... - s = best.s - if !best.rep && !best.dict { - for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { - best.offset-- - best.length++ - s-- - } - } - if false && best.offset >= s { - panic(fmt.Errorf("t %d >= s %d", best.offset, s)) - } - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := s - best.offset - s += best.length - - if offset > 65535 && s-base <= 5 && !best.rep { - // Bail if the match is equal or worse to the encoding. - s = best.s + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - if debug && nextEmit != base { - fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - if best.rep { - if nextEmit > 0 || best.dict { - if debug { - fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) - } - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], offset, best.length) - } else { - // First match without dict cannot be a repeat. - if debug { - fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) - } - d += emitCopy(dst[d:], offset, best.length) - } - } else { - if debug { - fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) - } - d += emitCopy(dst[d:], offset, best.length) - } - repeat = offset - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Fill tables... - for i := best.s + 1; i < s; i++ { - cv0 := load64(src, i) - long0 := hash8(cv0, lTableBits) - short0 := hash4(cv0, sTableBits) - lTable[long0] = uint64(i) | lTable[long0]<<32 - sTable[short0] = uint64(i) | sTable[short0]<<32 - } - cv = load64(src, s) - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - if debug && nextEmit != s { - fmt.Println("emitted ", len(src)-nextEmit, "literals") - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBestSnappy(dst, src []byte) (d int) { - // Initialize the hash tables. - const ( - // Long hash matches. - lTableBits = 19 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 16 - maxSTableSize = 1 << sTableBits - - inputMargin = 8 + 2 - ) - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - if len(src) < minNonLiteralBlockSize { - return 0 - } - - var lTable [maxLTableSize]uint64 - var sTable [maxSTableSize]uint64 - - // Bail if we can't compress to at least this. - dstLimit := len(src) - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 - const lowbitMask = 0xffffffff - getCur := func(x uint64) int { - return int(x & lowbitMask) - } - getPrev := func(x uint64) int { - return int(x >> 32) - } - const maxSkip = 64 - - for { - type match struct { - offset int - s int - length int - score int - } - var best match - for { - // Next src position to check - nextS := (s-nextEmit)>>8 + 1 - if nextS > maxSkip { - nextS = s + maxSkip - } else { - nextS += s - } - if nextS > sLimit { - goto emitRemainder - } - hashL := hash8(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL := lTable[hashL] - candidateS := sTable[hashS] - - score := func(m match) int { - // Matches that are longer forward are penalized since we must emit it as a literal. - score := m.length - m.s - if nextEmit == m.s { - // If we do not have to emit literals, we save 1 byte - score++ - } - offset := m.s - m.offset - - return score - emitCopyNoRepeatSize(offset, m.length) - } - - matchAt := func(offset, s int, first uint32) match { - if best.length != 0 && best.s-best.offset == s-offset { - // Don't retest if we have the same offset. - return match{offset: offset, s: s} - } - if load32(src, offset) != first { - return match{offset: offset, s: s} - } - m := match{offset: offset, s: s, length: 4 + offset} - s += 4 - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { - m.length += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - m.length += 8 - } - m.length -= offset - m.score = score(m) - if m.score <= -m.s { - // Eliminate if no savings, we might find a better one. - m.length = 0 - } - return m - } - - bestOf := func(a, b match) match { - if b.length == 0 { - return a - } - if a.length == 0 { - return b - } - as := a.score + b.s - bs := b.score + a.s - if as >= bs { - return a - } - return b - } - - best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) - best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) - best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) - - { - best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) - if best.length > 0 { - // s+1 - nextShort := sTable[hash4(cv>>8, sTableBits)] - s := s + 1 - cv := load64(src, s) - nextLong := lTable[hash8(cv, lTableBits)] - best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) - best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) - best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) - best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) - // Repeat at + 2 - best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) - - // s+2 - if true { - nextShort = sTable[hash4(cv>>8, sTableBits)] - s++ - cv = load64(src, s) - nextLong = lTable[hash8(cv, lTableBits)] - best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) - best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) - best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) - best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) - } - // Search for a match at best match end, see if that is better. - if sAt := best.s + best.length; sAt < sLimit { - sBack := best.s - backL := best.length - // Load initial values - cv = load64(src, sBack) - // Search for mismatch - next := lTable[hash8(load64(src, sAt), lTableBits)] - //next := sTable[hash4(load64(src, sAt), sTableBits)] - - if checkAt := getCur(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) - } - if checkAt := getPrev(next) - backL; checkAt > 0 { - best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) - } - } - } - } - - // Update table - lTable[hashL] = uint64(s) | candidateL<<32 - sTable[hashS] = uint64(s) | candidateS<<32 - - if best.length > 0 { - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards, not needed for repeats... - s = best.s - if true { - for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { - best.offset-- - best.length++ - s-- - } - } - if false && best.offset >= s { - panic(fmt.Errorf("t %d >= s %d", best.offset, s)) - } - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := s - best.offset - - s += best.length - - if offset > 65535 && s-base <= 5 { - // Bail if the match is equal or worse to the encoding. - s = best.s + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - d += emitCopyNoRepeat(dst[d:], offset, best.length) - repeat = offset - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Fill tables... - for i := best.s + 1; i < s; i++ { - cv0 := load64(src, i) - long0 := hash8(cv0, lTableBits) - short0 := hash4(cv0, sTableBits) - lTable[long0] = uint64(i) | lTable[long0]<<32 - sTable[short0] = uint64(i) | sTable[short0]<<32 - } - cv = load64(src, s) - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -// emitCopySize returns the size to encode the offset+length -// -// It assumes that: -// -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -func emitCopySize(offset, length int) int { - if offset >= 65536 { - i := 0 - if length > 64 { - length -= 64 - if length >= 4 { - // Emit remaining as repeats - return 5 + emitRepeatSize(offset, length) - } - i = 5 - } - if length == 0 { - return i - } - return i + 5 - } - - // Offset no more than 2 bytes. - if length > 64 { - if offset < 2048 { - // Emit 8 bytes, then rest as repeats... - return 2 + emitRepeatSize(offset, length-8) - } - // Emit remaining as repeats, at least 4 bytes remain. - return 3 + emitRepeatSize(offset, length-60) - } - if length >= 12 || offset >= 2048 { - return 3 - } - // Emit the remaining copy, encoded as 2 bytes. - return 2 -} - -// emitCopyNoRepeatSize returns the size to encode the offset+length -// -// It assumes that: -// -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -func emitCopyNoRepeatSize(offset, length int) int { - if offset >= 65536 { - return 5 + 5*(length/64) - } - - // Offset no more than 2 bytes. - if length > 64 { - // Emit remaining as repeats, at least 4 bytes remain. - return 3 + 3*(length/60) - } - if length >= 12 || offset >= 2048 { - return 3 - } - // Emit the remaining copy, encoded as 2 bytes. - return 2 -} - -// emitRepeatSize returns the number of bytes required to encode a repeat. -// Length must be at least 4 and < 1<<24 -func emitRepeatSize(offset, length int) int { - // Repeat offset, make length cheaper - if length <= 4+4 || (length < 8+4 && offset < 2048) { - return 2 - } - if length < (1<<8)+4+4 { - return 3 - } - if length < (1<<16)+(1<<8)+4 { - return 4 - } - const maxRepeat = (1 << 24) - 1 - length -= (1 << 16) - 4 - left := 0 - if length > maxRepeat { - left = length - maxRepeat + 4 - } - if left > 0 { - return 5 + emitRepeatSize(offset, left) - } - return 5 -} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go deleted file mode 100644 index 544cb1e17..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode_better.go +++ /dev/null @@ -1,1106 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "bytes" - "fmt" - "math/bits" -) - -// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint64, h uint8) uint32 { - const prime4bytes = 2654435761 - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - const prime5bytes = 889523592379 - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - const prime7bytes = 58295818150454627 - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - const prime8bytes = 0xcf1bbcdcb7a56463 - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} - -// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBetterGo(dst, src []byte) (d int) { - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - if len(src) < minNonLiteralBlockSize { - return 0 - } - - // Initialize the hash tables. - const ( - // Long hash matches. - lTableBits = 17 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 14 - maxSTableSize = 1 << sTableBits - ) - - var lTable [maxLTableSize]uint32 - var sTable [maxSTableSize]uint32 - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We initialize repeat to 0, so we never match on first attempt - repeat := 0 - - for { - candidateL := 0 - nextS := 0 - for { - // Next src position to check - nextS = s + (s-nextEmit)>>7 + 1 - if nextS > sLimit { - goto emitRemainder - } - hashL := hash7(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL = int(lTable[hashL]) - candidateS := int(sTable[hashS]) - lTable[hashL] = uint32(s) - sTable[hashS] = uint32(s) - - valLong := load64(src, candidateL) - valShort := load64(src, candidateS) - - // If long matches at least 8 bytes, use that. - if cv == valLong { - break - } - if cv == valShort { - candidateL = candidateS - break - } - - // Check repeat at offset checkRep. - const checkRep = 1 - // Minimum length of a repeat. Tested with various values. - // While 4-5 offers improvements in some, 6 reduces - // regressions significantly. - const wantRepeatBytes = 6 - const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) - if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + wantRepeatBytes + checkRep - s += wantRepeatBytes + checkRep - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidate] { - s++ - candidate++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - // Index in-between - index0 := base + 1 - index1 := s - 2 - - for index0 < index1 { - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 2 - index1 -= 2 - } - - cv = load64(src, s) - continue - } - - // Long likely matches 7, so take that. - if uint32(cv) == uint32(valLong) { - break - } - - // Check our short candidate - if uint32(cv) == uint32(valShort) { - // Try a long candidate at s+1 - hashL = hash7(cv>>8, lTableBits) - candidateL = int(lTable[hashL]) - lTable[hashL] = uint32(s + 1) - if uint32(cv>>8) == load32(src, candidateL) { - s++ - break - } - // Use our short candidate. - candidateL = candidateS - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { - candidateL-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := base - candidateL - - // Extend the 4-byte match as long as possible. - s += 4 - candidateL += 4 - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidateL] { - s++ - candidateL++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateL += 8 - } - - if offset > 65535 && s-base <= 5 && repeat != offset { - // Bail if the match is equal or worse to the encoding. - s = nextS + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - if repeat == offset { - d += emitRepeat(dst[d:], offset, s-base) - } else { - d += emitCopy(dst[d:], offset, s-base) - repeat = offset - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index short & long - index0 := base + 1 - index1 := s - 2 - - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - // lTable could be postponed, but very minor difference. - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 1 - index1 -= 1 - cv = load64(src, s) - - // Index large values sparsely in between. - // We do two starting from different offsets for speed. - index2 := (index0 + index1 + 1) >> 1 - for index2 < index1 { - lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) - lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) - index0 += 2 - index2 += 2 - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - if len(src) < minNonLiteralBlockSize { - return 0 - } - - // Initialize the hash tables. - const ( - // Long hash matches. - lTableBits = 16 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 14 - maxSTableSize = 1 << sTableBits - ) - - var lTable [maxLTableSize]uint32 - var sTable [maxSTableSize]uint32 - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We initialize repeat to 0, so we never match on first attempt - repeat := 0 - const maxSkip = 100 - - for { - candidateL := 0 - nextS := 0 - for { - // Next src position to check - nextS = (s-nextEmit)>>7 + 1 - if nextS > maxSkip { - nextS = s + maxSkip - } else { - nextS += s - } - - if nextS > sLimit { - goto emitRemainder - } - hashL := hash7(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL = int(lTable[hashL]) - candidateS := int(sTable[hashS]) - lTable[hashL] = uint32(s) - sTable[hashS] = uint32(s) - - if uint32(cv) == load32(src, candidateL) { - break - } - - // Check our short candidate - if uint32(cv) == load32(src, candidateS) { - // Try a long candidate at s+1 - hashL = hash7(cv>>8, lTableBits) - candidateL = int(lTable[hashL]) - lTable[hashL] = uint32(s + 1) - if uint32(cv>>8) == load32(src, candidateL) { - s++ - break - } - // Use our short candidate. - candidateL = candidateS - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { - candidateL-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := base - candidateL - - // Extend the 4-byte match as long as possible. - s += 4 - candidateL += 4 - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidateL] { - s++ - candidateL++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateL += 8 - } - - if offset > 65535 && s-base <= 5 && repeat != offset { - // Bail if the match is equal or worse to the encoding. - s = nextS + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - d += emitCopyNoRepeat(dst[d:], offset, s-base) - repeat = offset - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index short & long - index0 := base + 1 - index1 := s - 2 - - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 1 - index1 -= 1 - cv = load64(src, s) - - // Index large values sparsely in between. - // We do two starting from different offsets for speed. - index2 := (index0 + index1 + 1) >> 1 - for index2 < index1 { - lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) - lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) - index0 += 2 - index2 += 2 - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} - -// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - // Initialize the hash tables. - const ( - // Long hash matches. - lTableBits = 17 - maxLTableSize = 1 << lTableBits - - // Short hash matches. - sTableBits = 14 - maxSTableSize = 1 << sTableBits - - maxAhead = 8 // maximum bytes ahead without checking sLimit - - debug = false - ) - - sLimit := len(src) - inputMargin - if sLimit > MaxDictSrcOffset-maxAhead { - sLimit = MaxDictSrcOffset - maxAhead - } - if len(src) < minNonLiteralBlockSize { - return 0 - } - - dict.initBetter() - - var lTable [maxLTableSize]uint32 - var sTable [maxSTableSize]uint32 - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 0 - cv := load64(src, s) - - // We initialize repeat to 0, so we never match on first attempt - repeat := len(dict.dict) - dict.repeat - - // While in dict -searchDict: - for { - candidateL := 0 - nextS := 0 - for { - // Next src position to check - nextS = s + (s-nextEmit)>>7 + 1 - if nextS > sLimit { - break searchDict - } - hashL := hash7(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL = int(lTable[hashL]) - candidateS := int(sTable[hashS]) - dictL := int(dict.betterTableLong[hashL]) - dictS := int(dict.betterTableShort[hashS]) - lTable[hashL] = uint32(s) - sTable[hashS] = uint32(s) - - valLong := load64(src, candidateL) - valShort := load64(src, candidateS) - - // If long matches at least 8 bytes, use that. - if s != 0 { - if cv == valLong { - goto emitMatch - } - if cv == valShort { - candidateL = candidateS - goto emitMatch - } - } - - // Check dict repeat. - if repeat >= s+4 { - candidate := len(dict.dict) - repeat + s - if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { - // Extend back - base := s - for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - if debug && nextEmit != base { - fmt.Println("emitted ", base-nextEmit, "literals") - } - s += 4 - candidate += 4 - for candidate < len(dict.dict)-8 && s <= len(src)-8 { - if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - d += emitRepeat(dst[d:], repeat, s-base) - if debug { - fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) - } - nextEmit = s - if s >= sLimit { - break searchDict - } - // Index in-between - index0 := base + 1 - index1 := s - 2 - - cv = load64(src, s) - for index0 < index1 { - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 2 - index1 -= 2 - } - continue - } - } - // Don't try to find match at s==0 - if s == 0 { - cv = load64(src, nextS) - s = nextS - continue - } - - // Long likely matches 7, so take that. - if uint32(cv) == uint32(valLong) { - goto emitMatch - } - - // Long dict... - if uint32(cv) == load32(dict.dict, dictL) { - candidateL = dictL - goto emitDict - } - - // Check our short candidate - if uint32(cv) == uint32(valShort) { - // Try a long candidate at s+1 - hashL = hash7(cv>>8, lTableBits) - candidateL = int(lTable[hashL]) - lTable[hashL] = uint32(s + 1) - if uint32(cv>>8) == load32(src, candidateL) { - s++ - goto emitMatch - } - // Use our short candidate. - candidateL = candidateS - goto emitMatch - } - if uint32(cv) == load32(dict.dict, dictS) { - // Try a long candidate at s+1 - hashL = hash7(cv>>8, lTableBits) - candidateL = int(lTable[hashL]) - lTable[hashL] = uint32(s + 1) - if uint32(cv>>8) == load32(src, candidateL) { - s++ - goto emitMatch - } - candidateL = dictS - goto emitDict - } - cv = load64(src, nextS) - s = nextS - } - emitDict: - { - if debug { - if load32(dict.dict, candidateL) != load32(src, s) { - panic("dict emit mismatch") - } - } - // Extend backwards. - // The top bytes will be rechecked to get the full match. - for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { - candidateL-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteral(dst[d:], src[nextEmit:s]) - if debug && nextEmit != s { - fmt.Println("emitted ", s-nextEmit, "literals") - } - { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - offset := s + (len(dict.dict)) - candidateL - - // Extend the 4-byte match as long as possible. - s += 4 - candidateL += 4 - for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { - if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateL += 8 - } - - if repeat == offset { - if debug { - fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) - } - d += emitRepeat(dst[d:], offset, s-base) - } else { - if debug { - fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) - } - // Matches longer than 64 are split. - if s <= sLimit || s-base < 8 { - d += emitCopy(dst[d:], offset, s-base) - } else { - // Split to ensure we don't start a copy within next block. - d += emitCopy(dst[d:], offset, 4) - d += emitRepeat(dst[d:], offset, s-base-4) - } - repeat = offset - } - if false { - // Validate match. - if s <= candidateL { - panic("s <= candidate") - } - a := src[base:s] - b := dict.dict[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - nextEmit = s - if s >= sLimit { - break searchDict - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index short & long - index0 := base + 1 - index1 := s - 2 - - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 1 - index1 -= 1 - cv = load64(src, s) - - // index every second long in between. - for index0 < index1 { - lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) - lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) - index0 += 2 - index1 -= 2 - } - } - continue - } - emitMatch: - - // Extend backwards - for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { - candidateL-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := base - candidateL - - // Extend the 4-byte match as long as possible. - s += 4 - candidateL += 4 - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidateL] { - s++ - candidateL++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateL += 8 - } - - if offset > 65535 && s-base <= 5 && repeat != offset { - // Bail if the match is equal or worse to the encoding. - s = nextS + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - if debug && nextEmit != s { - fmt.Println("emitted ", s-nextEmit, "literals") - } - if repeat == offset { - if debug { - fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) - } - d += emitRepeat(dst[d:], offset, s-base) - } else { - if debug { - fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) - } - d += emitCopy(dst[d:], offset, s-base) - repeat = offset - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index short & long - index0 := base + 1 - index1 := s - 2 - - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 1 - index1 -= 1 - cv = load64(src, s) - - // Index large values sparsely in between. - // We do two starting from different offsets for speed. - index2 := (index0 + index1 + 1) >> 1 - for index2 < index1 { - lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) - lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) - index0 += 2 - index2 += 2 - } - } - - // Search without dict: - if repeat > s { - repeat = 0 - } - - // No more dict - sLimit = len(src) - inputMargin - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - if debug { - fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) - } - for { - candidateL := 0 - nextS := 0 - for { - // Next src position to check - nextS = s + (s-nextEmit)>>7 + 1 - if nextS > sLimit { - goto emitRemainder - } - hashL := hash7(cv, lTableBits) - hashS := hash4(cv, sTableBits) - candidateL = int(lTable[hashL]) - candidateS := int(sTable[hashS]) - lTable[hashL] = uint32(s) - sTable[hashS] = uint32(s) - - valLong := load64(src, candidateL) - valShort := load64(src, candidateS) - - // If long matches at least 8 bytes, use that. - if cv == valLong { - break - } - if cv == valShort { - candidateL = candidateS - break - } - - // Check repeat at offset checkRep. - const checkRep = 1 - // Minimum length of a repeat. Tested with various values. - // While 4-5 offers improvements in some, 6 reduces - // regressions significantly. - const wantRepeatBytes = 6 - const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) - if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteral(dst[d:], src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + wantRepeatBytes + checkRep - s += wantRepeatBytes + checkRep - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidate] { - s++ - candidate++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. - d += emitRepeat(dst[d:], repeat, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - // Index in-between - index0 := base + 1 - index1 := s - 2 - - for index0 < index1 { - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 2 - index1 -= 2 - } - - cv = load64(src, s) - continue - } - - // Long likely matches 7, so take that. - if uint32(cv) == uint32(valLong) { - break - } - - // Check our short candidate - if uint32(cv) == uint32(valShort) { - // Try a long candidate at s+1 - hashL = hash7(cv>>8, lTableBits) - candidateL = int(lTable[hashL]) - lTable[hashL] = uint32(s + 1) - if uint32(cv>>8) == load32(src, candidateL) { - s++ - break - } - // Use our short candidate. - candidateL = candidateS - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { - candidateL-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - base := s - offset := base - candidateL - - // Extend the 4-byte match as long as possible. - s += 4 - candidateL += 4 - for s < len(src) { - if len(src)-s < 8 { - if src[s] == src[candidateL] { - s++ - candidateL++ - continue - } - break - } - if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidateL += 8 - } - - if offset > 65535 && s-base <= 5 && repeat != offset { - // Bail if the match is equal or worse to the encoding. - s = nextS + 1 - if s >= sLimit { - goto emitRemainder - } - cv = load64(src, s) - continue - } - - d += emitLiteral(dst[d:], src[nextEmit:base]) - if repeat == offset { - d += emitRepeat(dst[d:], offset, s-base) - } else { - d += emitCopy(dst[d:], offset, s-base) - repeat = offset - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - - // Index short & long - index0 := base + 1 - index1 := s - 2 - - cv0 := load64(src, index0) - cv1 := load64(src, index1) - lTable[hash7(cv0, lTableBits)] = uint32(index0) - sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) - - lTable[hash7(cv1, lTableBits)] = uint32(index1) - sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) - index0 += 1 - index1 -= 1 - cv = load64(src, s) - - // Index large values sparsely in between. - // We do two starting from different offsets for speed. - index2 := (index0 + index1 + 1) >> 1 - for index2 < index1 { - lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) - lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) - index0 += 2 - index2 += 2 - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go deleted file mode 100644 index dd1c973ca..000000000 --- a/vendor/github.com/klauspost/compress/s2/encode_go.go +++ /dev/null @@ -1,729 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package s2 - -import ( - "bytes" - "math/bits" -) - -const hasAmd64Asm = false - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) -func encodeBlock(dst, src []byte) (d int) { - if len(src) < minNonLiteralBlockSize { - return 0 - } - return encodeBlockGo(dst, src) -} - -// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) -func encodeBlockBetter(dst, src []byte) (d int) { - return encodeBlockBetterGo(dst, src) -} - -// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) -func encodeBlockBetterSnappy(dst, src []byte) (d int) { - return encodeBlockBetterSnappyGo(dst, src) -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) -func encodeBlockSnappy(dst, src []byte) (d int) { - if len(src) < minNonLiteralBlockSize { - return 0 - } - return encodeBlockSnappyGo(dst, src) -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 -func emitLiteral(dst, lit []byte) int { - if len(lit) == 0 { - return 0 - } - const num = 63<<2 | tagLiteral - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[1] = uint8(n) - dst[0] = 60<<2 | tagLiteral - i = 2 - case n < 1<<16: - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 61<<2 | tagLiteral - i = 3 - case n < 1<<24: - dst[3] = uint8(n >> 16) - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 62<<2 | tagLiteral - i = 4 - default: - dst[4] = uint8(n >> 24) - dst[3] = uint8(n >> 16) - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 63<<2 | tagLiteral - i = 5 - } - return i + copy(dst[i:], lit) -} - -// emitRepeat writes a repeat chunk and returns the number of bytes written. -// Length must be at least 4 and < 1<<24 -func emitRepeat(dst []byte, offset, length int) int { - // Repeat offset, make length cheaper - length -= 4 - if length <= 4 { - dst[0] = uint8(length)<<2 | tagCopy1 - dst[1] = 0 - return 2 - } - if length < 8 && offset < 2048 { - // Encode WITH offset - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 - return 2 - } - if length < (1<<8)+4 { - length -= 4 - dst[2] = uint8(length) - dst[1] = 0 - dst[0] = 5<<2 | tagCopy1 - return 3 - } - if length < (1<<16)+(1<<8) { - length -= 1 << 8 - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 6<<2 | tagCopy1 - return 4 - } - const maxRepeat = (1 << 24) - 1 - length -= 1 << 16 - left := 0 - if length > maxRepeat { - left = length - maxRepeat + 4 - length = maxRepeat - 4 - } - dst[4] = uint8(length >> 16) - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 7<<2 | tagCopy1 - if left > 0 { - return 5 + emitRepeat(dst[5:], offset, left) - } - return 5 -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -func emitCopy(dst []byte, offset, length int) int { - if offset >= 65536 { - i := 0 - if length > 64 { - // Emit a length 64 copy, encoded as 5 bytes. - dst[4] = uint8(offset >> 24) - dst[3] = uint8(offset >> 16) - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 63<<2 | tagCopy4 - length -= 64 - if length >= 4 { - // Emit remaining as repeats - return 5 + emitRepeat(dst[5:], offset, length) - } - i = 5 - } - if length == 0 { - return i - } - // Emit a copy, offset encoded as 4 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy4 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - dst[i+3] = uint8(offset >> 16) - dst[i+4] = uint8(offset >> 24) - return i + 5 - } - - // Offset no more than 2 bytes. - if length > 64 { - off := 3 - if offset < 2048 { - // emit 8 bytes as tagCopy1, rest as repeats. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 - length -= 8 - off = 2 - } else { - // Emit a length 60 copy, encoded as 3 bytes. - // Emit remaining as repeat value (minimum 4 bytes). - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 59<<2 | tagCopy2 - length -= 60 - } - // Emit remaining as repeats, at least 4 bytes remain. - return off + emitRepeat(dst[off:], offset, length) - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = uint8(length-1)<<2 | tagCopy2 - return 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - return 2 -} - -// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -func emitCopyNoRepeat(dst []byte, offset, length int) int { - if offset >= 65536 { - i := 0 - if length > 64 { - // Emit a length 64 copy, encoded as 5 bytes. - dst[4] = uint8(offset >> 24) - dst[3] = uint8(offset >> 16) - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 63<<2 | tagCopy4 - length -= 64 - if length >= 4 { - // Emit remaining as repeats - return 5 + emitCopyNoRepeat(dst[5:], offset, length) - } - i = 5 - } - if length == 0 { - return i - } - // Emit a copy, offset encoded as 4 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy4 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - dst[i+3] = uint8(offset >> 16) - dst[i+4] = uint8(offset >> 24) - return i + 5 - } - - // Offset no more than 2 bytes. - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - // Emit remaining as repeat value (minimum 4 bytes). - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 59<<2 | tagCopy2 - length -= 60 - // Emit remaining as repeats, at least 4 bytes remain. - return 3 + emitCopyNoRepeat(dst[3:], offset, length) - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = uint8(length-1)<<2 | tagCopy2 - return 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - return 2 -} - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) -func matchLen(a []byte, b []byte) int { - b = b[:len(a)] - var checked int - if len(a) > 4 { - // Try 4 bytes first - if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { - return bits.TrailingZeros32(diff) >> 3 - } - // Switch to 8 byte matching. - checked = 4 - a = a[4:] - b = b[4:] - for len(a) >= 8 { - b = b[:len(a)] - if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] - } - } - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - return int(i) + checked - } - } - return len(a) + checked -} - -// input must be > inputMargin -func calcBlockSize(src []byte, _ *[32768]byte) (d int) { - // Initialize the hash table. - const ( - tableBits = 13 - maxTableSize = 1 << tableBits - ) - - var table [maxTableSize]uint32 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 - - for { - candidate := 0 - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - if nextS > sLimit { - goto emitRemainder - } - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - candidate = int(table[hash0]) - candidate2 := int(table[hash1]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteralSize(src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeatSize(repeat, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - cv = load64(src, s) - continue - } - - if uint32(cv) == load32(src, candidate) { - break - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - break - } - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteralSize(src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeatSize(repeat, s-base) - if false { - // Validate match. - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteralSize(src[nextEmit:]) - } - return d -} - -// length must be > inputMargin. -func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) { - // Initialize the hash table. - const ( - tableBits = 9 - maxTableSize = 1 << tableBits - ) - - var table [maxTableSize]uint32 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // Bail if we can't compress to at least this. - dstLimit := len(src) - len(src)>>5 - 5 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - cv := load64(src, s) - - // We search for a repeat at -1, but don't output repeats when nextEmit == 0 - repeat := 1 - - for { - candidate := 0 - for { - // Next src position to check - nextS := s + (s-nextEmit)>>6 + 4 - if nextS > sLimit { - goto emitRemainder - } - hash0 := hash6(cv, tableBits) - hash1 := hash6(cv>>8, tableBits) - candidate = int(table[hash0]) - candidate2 := int(table[hash1]) - table[hash0] = uint32(s) - table[hash1] = uint32(s + 1) - hash2 := hash6(cv>>16, tableBits) - - // Check repeat at offset checkRep. - const checkRep = 1 - if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { - base := s + checkRep - // Extend back - for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { - i-- - base-- - } - d += emitLiteralSize(src[nextEmit:base]) - - // Extend forward - candidate := s - repeat + 4 + checkRep - s += 4 + checkRep - for s <= sLimit { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeatSize(repeat, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - cv = load64(src, s) - continue - } - - if uint32(cv) == load32(src, candidate) { - break - } - candidate = int(table[hash2]) - if uint32(cv>>8) == load32(src, candidate2) { - table[hash2] = uint32(s + 2) - candidate = candidate2 - s++ - break - } - table[hash2] = uint32(s + 2) - if uint32(cv>>16) == load32(src, candidate) { - s += 2 - break - } - - cv = load64(src, nextS) - s = nextS - } - - // Extend backwards - for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { - candidate-- - s-- - } - - // Bail if we exceed the maximum size. - if d+(s-nextEmit) > dstLimit { - return 0 - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - d += emitLiteralSize(src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - repeat = base - candidate - - // Extend the 4-byte match as long as possible. - s += 4 - candidate += 4 - for s <= len(src)-8 { - if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { - s += bits.TrailingZeros64(diff) >> 3 - break - } - s += 8 - candidate += 8 - } - - d += emitCopyNoRepeatSize(repeat, s-base) - if false { - // Validate match. - a := src[base:s] - b := src[base-repeat : base-repeat+(s-base)] - if !bytes.Equal(a, b) { - panic("mismatch") - } - } - - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - if d > dstLimit { - // Do we have space for more, if not bail. - return 0 - } - // Check for an immediate match, otherwise start search at s+1 - x := load64(src, s-2) - m2Hash := hash6(x, tableBits) - currHash := hash6(x>>16, tableBits) - candidate = int(table[currHash]) - table[m2Hash] = uint32(s - 2) - table[currHash] = uint32(s) - if uint32(x>>16) != load32(src, candidate) { - cv = load64(src, s+1) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - // Bail if we exceed the maximum size. - if d+len(src)-nextEmit > dstLimit { - return 0 - } - d += emitLiteralSize(src[nextEmit:]) - } - return d -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 -func emitLiteralSize(lit []byte) int { - if len(lit) == 0 { - return 0 - } - switch { - case len(lit) <= 60: - return len(lit) + 1 - case len(lit) <= 1<<8: - return len(lit) + 2 - case len(lit) <= 1<<16: - return len(lit) + 3 - case len(lit) <= 1<<24: - return len(lit) + 4 - default: - return len(lit) + 5 - } -} - -func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { - panic("cvtLZ4BlockAsm should be unreachable") -} - -func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { - panic("cvtLZ4BlockSnappyAsm should be unreachable") -} - -func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { - panic("cvtLZ4sBlockAsm should be unreachable") -} - -func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { - panic("cvtLZ4sBlockSnappyAsm should be unreachable") -} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go deleted file mode 100644 index f43aa8154..000000000 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go +++ /dev/null @@ -1,228 +0,0 @@ -// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -package s2 - -func _dummy_() - -// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4294967295 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int - -// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4194304 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int - -// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 16383 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int - -// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4095 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int - -// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 511 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int - -// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4294967295 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int - -// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4194304 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int - -// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 16383 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int - -// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4095 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int - -// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 511 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int - -// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4294967295 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int - -// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 65535 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int - -// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 16383 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int - -// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4095 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int - -// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 511 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int - -// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4294967295 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int - -// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 65535 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int - -// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 16383 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int - -// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4095 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int - -// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 511 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int - -// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 4294967295 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func calcBlockSize(src []byte, tmp *[32768]byte) int - -// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. -// Maximum input 1024 bytes. -// It assumes that the varint-encoded length of the decompressed bytes has already been written. -// -//go:noescape -func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes with margin of 0 bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 -// -//go:noescape -func emitLiteral(dst []byte, lit []byte) int - -// emitRepeat writes a repeat chunk and returns the number of bytes written. -// Length must be at least 4 and < 1<<32 -// -//go:noescape -func emitRepeat(dst []byte, offset int, length int) int - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -// -//go:noescape -func emitCopy(dst []byte, offset int, length int) int - -// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint32 -// 4 <= length && length <= 1 << 24 -// -//go:noescape -func emitCopyNoRepeat(dst []byte, offset int, length int) int - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) -// -//go:noescape -func matchLen(a []byte, b []byte) int - -// cvtLZ4Block converts an LZ4 block to S2 -// -//go:noescape -func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) - -// cvtLZ4sBlock converts an LZ4s block to S2 -// -//go:noescape -func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) - -// cvtLZ4Block converts an LZ4 block to Snappy -// -//go:noescape -func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) - -// cvtLZ4sBlock converts an LZ4s block to Snappy -// -//go:noescape -func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s deleted file mode 100644 index df9be687b..000000000 --- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s +++ /dev/null @@ -1,21303 +0,0 @@ -// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func _dummy_() -TEXT ·_dummy_(SB), $0 -#ifdef GOAMD64_v4 -#ifndef GOAMD64_v3 -#define GOAMD64_v3 -#endif -#endif - RET - -// func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBlockAsm(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBlockAsm: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBlockAsm - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBlockAsm: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBlockAsm - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeBlockAsm - LEAL 1(DX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI - JZ repeat_extend_back_end_encodeBlockAsm - -repeat_extend_back_loop_encodeBlockAsm: - CMPL DI, R8 - JBE repeat_extend_back_end_encodeBlockAsm - MOVB -1(BX)(SI*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeBlockAsm - LEAL -1(DI), DI - DECL SI - JNZ repeat_extend_back_loop_encodeBlockAsm - -repeat_extend_back_end_encodeBlockAsm: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 5(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeBlockAsm - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeBlockAsm - CMPL SI, $0x00010000 - JB three_bytes_repeat_emit_encodeBlockAsm - CMPL SI, $0x01000000 - JB four_bytes_repeat_emit_encodeBlockAsm - MOVB $0xfc, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_repeat_emit_encodeBlockAsm - -four_bytes_repeat_emit_encodeBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_repeat_emit_encodeBlockAsm - -three_bytes_repeat_emit_encodeBlockAsm: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeBlockAsm - -two_bytes_repeat_emit_encodeBlockAsm: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeBlockAsm - JMP memmove_long_repeat_emit_encodeBlockAsm - -one_byte_repeat_emit_encodeBlockAsm: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm - -emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm - -emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm - -emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_repeat_emit_encodeBlockAsm: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeBlockAsm - -memmove_long_repeat_emit_encodeBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeBlockAsm: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL DX, R9 - LEAQ (BX)(DX*1), R10 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_repeat_extend_encodeBlockAsm: - CMPL R9, $0x10 - JB matchlen_match8_repeat_extend_encodeBlockAsm - MOVQ (R10)(R12*1), R11 - MOVQ 8(R10)(R12*1), R13 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm - XORQ 8(SI)(R12*1), R13 - JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm - LEAL -16(R9), R9 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm - -matchlen_bsf_16repeat_extend_encodeBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm - -matchlen_match8_repeat_extend_encodeBlockAsm: - CMPL R9, $0x08 - JB matchlen_match4_repeat_extend_encodeBlockAsm - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm - LEAL -8(R9), R9 - LEAL 8(R12), R12 - JMP matchlen_match4_repeat_extend_encodeBlockAsm - -matchlen_bsf_8_repeat_extend_encodeBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm - -matchlen_match4_repeat_extend_encodeBlockAsm: - CMPL R9, $0x04 - JB matchlen_match2_repeat_extend_encodeBlockAsm - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 - JNE matchlen_match2_repeat_extend_encodeBlockAsm - LEAL -4(R9), R9 - LEAL 4(R12), R12 - -matchlen_match2_repeat_extend_encodeBlockAsm: - CMPL R9, $0x01 - JE matchlen_match1_repeat_extend_encodeBlockAsm - JB repeat_extend_forward_end_encodeBlockAsm - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 - JNE matchlen_match1_repeat_extend_encodeBlockAsm - LEAL 2(R12), R12 - SUBL $0x02, R9 - JZ repeat_extend_forward_end_encodeBlockAsm - -matchlen_match1_repeat_extend_encodeBlockAsm: - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 - JNE repeat_extend_forward_end_encodeBlockAsm - LEAL 1(R12), R12 - -repeat_extend_forward_end_encodeBlockAsm: - ADDL R12, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 - JZ repeat_as_copy_encodeBlockAsm - - // emitRepeat -emit_repeat_again_match_repeat_encodeBlockAsm: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_match_repeat_encodeBlockAsm - CMPL R8, $0x0c - JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm - CMPL DI, $0x00000800 - JB repeat_two_offset_match_repeat_encodeBlockAsm - -cant_repeat_two_offset_match_repeat_encodeBlockAsm: - CMPL SI, $0x00000104 - JB repeat_three_match_repeat_encodeBlockAsm - CMPL SI, $0x00010100 - JB repeat_four_match_repeat_encodeBlockAsm - CMPL SI, $0x0100ffff - JB repeat_five_match_repeat_encodeBlockAsm - LEAL -16842747(SI), SI - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_repeat_encodeBlockAsm - -repeat_five_match_repeat_encodeBlockAsm: - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_four_match_repeat_encodeBlockAsm: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_three_match_repeat_encodeBlockAsm: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_match_repeat_encodeBlockAsm: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_offset_match_repeat_encodeBlockAsm: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_as_copy_encodeBlockAsm: - // emitCopy - CMPL DI, $0x00010000 - JB two_byte_offset_repeat_as_copy_encodeBlockAsm - CMPL SI, $0x40 - JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm - MOVB $0xff, (CX) - MOVL DI, 1(CX) - LEAL -64(SI), SI - ADDQ $0x05, CX - CMPL SI, $0x04 - JB four_bytes_remain_repeat_as_copy_encodeBlockAsm - - // emitRepeat -emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy - CMPL SI, $0x0100ffff - JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy - LEAL -16842747(SI), SI - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy - -repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -four_bytes_remain_repeat_as_copy_encodeBlockAsm: - TESTL SI, SI - JZ repeat_end_emit_encodeBlockAsm - XORL R8, R8 - LEAL -1(R8)(SI*4), SI - MOVB SI, (CX) - MOVL DI, 1(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm - -two_byte_offset_repeat_as_copy_encodeBlockAsm: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm - CMPL DI, $0x00000800 - JAE long_offset_short_repeat_as_copy_encodeBlockAsm - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(CX) - MOVL DI, R9 - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - SUBL $0x08, SI - - // emitRepeat - LEAL -4(SI), SI - JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - -emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x0100ffff - JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - LEAL -16842747(SI), SI - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b - -repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -long_offset_short_repeat_as_copy_encodeBlockAsm: - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - - // emitRepeat -emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short - CMPL SI, $0x0100ffff - JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short - LEAL -16842747(SI), SI - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short - -repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -two_byte_offset_short_repeat_as_copy_encodeBlockAsm: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm - -emit_copy_three_repeat_as_copy_encodeBlockAsm: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeBlockAsm: - MOVL DX, 12(SP) - JMP search_loop_encodeBlockAsm - -no_repeat_found_encodeBlockAsm: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBlockAsm - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeBlockAsm - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeBlockAsm - MOVL 20(SP), DX - JMP search_loop_encodeBlockAsm - -candidate3_match_encodeBlockAsm: - ADDL $0x02, DX - JMP candidate_match_encodeBlockAsm - -candidate2_match_encodeBlockAsm: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBlockAsm - -match_extend_back_loop_encodeBlockAsm: - CMPL DX, DI - JBE match_extend_back_end_encodeBlockAsm - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBlockAsm - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBlockAsm - JMP match_extend_back_loop_encodeBlockAsm - -match_extend_back_end_encodeBlockAsm: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 5(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBlockAsm: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeBlockAsm - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeBlockAsm - CMPL R8, $0x00010000 - JB three_bytes_match_emit_encodeBlockAsm - CMPL R8, $0x01000000 - JB four_bytes_match_emit_encodeBlockAsm - MOVB $0xfc, (CX) - MOVL R8, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_match_emit_encodeBlockAsm - -four_bytes_match_emit_encodeBlockAsm: - MOVL R8, R10 - SHRL $0x10, R10 - MOVB $0xf8, (CX) - MOVW R8, 1(CX) - MOVB R10, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeBlockAsm - -three_bytes_match_emit_encodeBlockAsm: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBlockAsm - -two_bytes_match_emit_encodeBlockAsm: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeBlockAsm - JMP memmove_long_match_emit_encodeBlockAsm - -one_byte_match_emit_encodeBlockAsm: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBlockAsm: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeBlockAsm - -emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm - -emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm - -emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBlockAsm: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeBlockAsm - -memmove_long_match_emit_encodeBlockAsm: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeBlockAsm: -match_nolit_loop_encodeBlockAsm: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeBlockAsm: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeBlockAsm - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeBlockAsm - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeBlockAsm - -matchlen_bsf_16match_nolit_encodeBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeBlockAsm - -matchlen_match8_match_nolit_encodeBlockAsm: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeBlockAsm - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeBlockAsm - -matchlen_bsf_8_match_nolit_encodeBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeBlockAsm - -matchlen_match4_match_nolit_encodeBlockAsm: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeBlockAsm - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeBlockAsm - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeBlockAsm: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeBlockAsm - JB match_nolit_end_encodeBlockAsm - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeBlockAsm - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeBlockAsm - -matchlen_match1_match_nolit_encodeBlockAsm: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeBlockAsm - LEAL 1(R10), R10 - -match_nolit_end_encodeBlockAsm: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL SI, $0x00010000 - JB two_byte_offset_match_nolit_encodeBlockAsm - CMPL R10, $0x40 - JBE four_bytes_remain_match_nolit_encodeBlockAsm - MOVB $0xff, (CX) - MOVL SI, 1(CX) - LEAL -64(R10), R10 - ADDQ $0x05, CX - CMPL R10, $0x04 - JB four_bytes_remain_match_nolit_encodeBlockAsm - - // emitRepeat -emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy - -cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm_emit_copy - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm_emit_copy - CMPL R10, $0x0100ffff - JB repeat_five_match_nolit_encodeBlockAsm_emit_copy - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy - -repeat_five_match_nolit_encodeBlockAsm_emit_copy: - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_four_match_nolit_encodeBlockAsm_emit_copy: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_three_match_nolit_encodeBlockAsm_emit_copy: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_match_nolit_encodeBlockAsm_emit_copy: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -four_bytes_remain_match_nolit_encodeBlockAsm: - TESTL R10, R10 - JZ match_nolit_emitcopy_end_encodeBlockAsm - XORL DI, DI - LEAL -1(DI)(R10*4), R10 - MOVB R10, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -two_byte_offset_match_nolit_encodeBlockAsm: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBlockAsm - CMPL SI, $0x00000800 - JAE long_offset_short_match_nolit_encodeBlockAsm - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(CX) - MOVL SI, R8 - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, DI - MOVB DI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b - -emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b - CMPL R10, $0x0100ffff - JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b - -repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -long_offset_short_match_nolit_encodeBlockAsm: - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - - // emitRepeat -emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short - CMPL R10, $0x0100ffff - JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short - -repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -two_byte_offset_short_match_nolit_encodeBlockAsm: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeBlockAsm - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBlockAsm - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm - -emit_copy_three_match_nolit_encodeBlockAsm: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeBlockAsm: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBlockAsm - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBlockAsm: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeBlockAsm - INCL DX - JMP search_loop_encodeBlockAsm - -emit_remainder_encodeBlockAsm: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 5(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBlockAsm: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBlockAsm - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBlockAsm - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBlockAsm - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeBlockAsm - CMPL DX, $0x01000000 - JB four_bytes_emit_remainder_encodeBlockAsm - MOVB $0xfc, (CX) - MOVL DX, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_emit_remainder_encodeBlockAsm - -four_bytes_emit_remainder_encodeBlockAsm: - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeBlockAsm - -three_bytes_emit_remainder_encodeBlockAsm: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBlockAsm - -two_bytes_emit_remainder_encodeBlockAsm: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBlockAsm - JMP memmove_long_emit_remainder_encodeBlockAsm - -one_byte_emit_remainder_encodeBlockAsm: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm - -emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBlockAsm: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBlockAsm - -memmove_long_emit_remainder_encodeBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBlockAsm: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBlockAsm4MB(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBlockAsm4MB: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBlockAsm4MB - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBlockAsm4MB: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBlockAsm4MB - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeBlockAsm4MB - LEAL 1(DX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI - JZ repeat_extend_back_end_encodeBlockAsm4MB - -repeat_extend_back_loop_encodeBlockAsm4MB: - CMPL DI, R8 - JBE repeat_extend_back_end_encodeBlockAsm4MB - MOVB -1(BX)(SI*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeBlockAsm4MB - LEAL -1(DI), DI - DECL SI - JNZ repeat_extend_back_loop_encodeBlockAsm4MB - -repeat_extend_back_end_encodeBlockAsm4MB: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 4(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeBlockAsm4MB: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeBlockAsm4MB - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeBlockAsm4MB - CMPL SI, $0x00010000 - JB three_bytes_repeat_emit_encodeBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_repeat_emit_encodeBlockAsm4MB - -three_bytes_repeat_emit_encodeBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeBlockAsm4MB - -two_bytes_repeat_emit_encodeBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeBlockAsm4MB - JMP memmove_long_repeat_emit_encodeBlockAsm4MB - -one_byte_repeat_emit_encodeBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB - -emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB - -emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB - -emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_repeat_emit_encodeBlockAsm4MB: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB - -memmove_long_repeat_emit_encodeBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeBlockAsm4MB: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL DX, R9 - LEAQ (BX)(DX*1), R10 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x10 - JB matchlen_match8_repeat_extend_encodeBlockAsm4MB - MOVQ (R10)(R12*1), R11 - MOVQ 8(R10)(R12*1), R13 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB - XORQ 8(SI)(R12*1), R13 - JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB - LEAL -16(R9), R9 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB - -matchlen_bsf_16repeat_extend_encodeBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm4MB - -matchlen_match8_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x08 - JB matchlen_match4_repeat_extend_encodeBlockAsm4MB - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB - LEAL -8(R9), R9 - LEAL 8(R12), R12 - JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB - -matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm4MB - -matchlen_match4_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x04 - JB matchlen_match2_repeat_extend_encodeBlockAsm4MB - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 - JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB - LEAL -4(R9), R9 - LEAL 4(R12), R12 - -matchlen_match2_repeat_extend_encodeBlockAsm4MB: - CMPL R9, $0x01 - JE matchlen_match1_repeat_extend_encodeBlockAsm4MB - JB repeat_extend_forward_end_encodeBlockAsm4MB - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 - JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB - LEAL 2(R12), R12 - SUBL $0x02, R9 - JZ repeat_extend_forward_end_encodeBlockAsm4MB - -matchlen_match1_repeat_extend_encodeBlockAsm4MB: - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 - JNE repeat_extend_forward_end_encodeBlockAsm4MB - LEAL 1(R12), R12 - -repeat_extend_forward_end_encodeBlockAsm4MB: - ADDL R12, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 - JZ repeat_as_copy_encodeBlockAsm4MB - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_match_repeat_encodeBlockAsm4MB - CMPL R8, $0x0c - JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB - CMPL DI, $0x00000800 - JB repeat_two_offset_match_repeat_encodeBlockAsm4MB - -cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: - CMPL SI, $0x00000104 - JB repeat_three_match_repeat_encodeBlockAsm4MB - CMPL SI, $0x00010100 - JB repeat_four_match_repeat_encodeBlockAsm4MB - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_four_match_repeat_encodeBlockAsm4MB: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_three_match_repeat_encodeBlockAsm4MB: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_match_repeat_encodeBlockAsm4MB: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_offset_match_repeat_encodeBlockAsm4MB: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_as_copy_encodeBlockAsm4MB: - // emitCopy - CMPL DI, $0x00010000 - JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB - CMPL SI, $0x40 - JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB - MOVB $0xff, (CX) - MOVL DI, 1(CX) - LEAL -64(SI), SI - ADDQ $0x05, CX - CMPL SI, $0x04 - JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: - TESTL SI, SI - JZ repeat_end_emit_encodeBlockAsm4MB - XORL R8, R8 - LEAL -1(R8)(SI*4), SI - MOVB SI, (CX) - MOVL DI, 1(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB - CMPL DI, $0x00000800 - JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - SUBL $0x08, SI - - // emitRepeat - LEAL -4(SI), SI - JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -long_offset_short_repeat_as_copy_encodeBlockAsm4MB: - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - CMPL SI, $0x00010100 - JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short - LEAL -65536(SI), SI - MOVL SI, DI - MOVW $0x001d, (CX) - MOVW SI, 2(CX) - SARL $0x10, DI - MOVB DI, 4(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm4MB - -emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeBlockAsm4MB: - MOVL DX, 12(SP) - JMP search_loop_encodeBlockAsm4MB - -no_repeat_found_encodeBlockAsm4MB: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBlockAsm4MB - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeBlockAsm4MB - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeBlockAsm4MB - MOVL 20(SP), DX - JMP search_loop_encodeBlockAsm4MB - -candidate3_match_encodeBlockAsm4MB: - ADDL $0x02, DX - JMP candidate_match_encodeBlockAsm4MB - -candidate2_match_encodeBlockAsm4MB: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeBlockAsm4MB: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBlockAsm4MB - -match_extend_back_loop_encodeBlockAsm4MB: - CMPL DX, DI - JBE match_extend_back_end_encodeBlockAsm4MB - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBlockAsm4MB - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBlockAsm4MB - JMP match_extend_back_loop_encodeBlockAsm4MB - -match_extend_back_end_encodeBlockAsm4MB: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 4(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBlockAsm4MB: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeBlockAsm4MB - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeBlockAsm4MB - CMPL R8, $0x00010000 - JB three_bytes_match_emit_encodeBlockAsm4MB - MOVL R8, R10 - SHRL $0x10, R10 - MOVB $0xf8, (CX) - MOVW R8, 1(CX) - MOVB R10, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeBlockAsm4MB - -three_bytes_match_emit_encodeBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBlockAsm4MB - -two_bytes_match_emit_encodeBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeBlockAsm4MB - JMP memmove_long_match_emit_encodeBlockAsm4MB - -one_byte_match_emit_encodeBlockAsm4MB: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBlockAsm4MB: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBlockAsm4MB: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeBlockAsm4MB - -memmove_long_match_emit_encodeBlockAsm4MB: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeBlockAsm4MB: -match_nolit_loop_encodeBlockAsm4MB: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeBlockAsm4MB - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB - -matchlen_bsf_16match_nolit_encodeBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeBlockAsm4MB - -matchlen_match8_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeBlockAsm4MB - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeBlockAsm4MB - -matchlen_bsf_8_match_nolit_encodeBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeBlockAsm4MB - -matchlen_match4_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeBlockAsm4MB - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeBlockAsm4MB - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeBlockAsm4MB: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeBlockAsm4MB - JB match_nolit_end_encodeBlockAsm4MB - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeBlockAsm4MB - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeBlockAsm4MB - -matchlen_match1_match_nolit_encodeBlockAsm4MB: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeBlockAsm4MB - LEAL 1(R10), R10 - -match_nolit_end_encodeBlockAsm4MB: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL SI, $0x00010000 - JB two_byte_offset_match_nolit_encodeBlockAsm4MB - CMPL R10, $0x40 - JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB - MOVB $0xff, (CX) - MOVL SI, 1(CX) - LEAL -64(R10), R10 - ADDQ $0x05, CX - CMPL R10, $0x04 - JB four_bytes_remain_match_nolit_encodeBlockAsm4MB - - // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy - -cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -four_bytes_remain_match_nolit_encodeBlockAsm4MB: - TESTL R10, R10 - JZ match_nolit_emitcopy_end_encodeBlockAsm4MB - XORL DI, DI - LEAL -1(DI)(R10*4), R10 - MOVB R10, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -two_byte_offset_match_nolit_encodeBlockAsm4MB: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB - CMPL SI, $0x00000800 - JAE long_offset_short_match_nolit_encodeBlockAsm4MB - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -long_offset_short_match_nolit_encodeBlockAsm4MB: - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - - // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short - CMPL R10, $0x00010100 - JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short - LEAL -65536(R10), R10 - MOVL R10, SI - MOVW $0x001d, (CX) - MOVW R10, 2(CX) - SARL $0x10, SI - MOVB SI, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -two_byte_offset_short_match_nolit_encodeBlockAsm4MB: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeBlockAsm4MB - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBlockAsm4MB - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm4MB - -emit_copy_three_match_nolit_encodeBlockAsm4MB: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeBlockAsm4MB: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBlockAsm4MB - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBlockAsm4MB: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeBlockAsm4MB - INCL DX - JMP search_loop_encodeBlockAsm4MB - -emit_remainder_encodeBlockAsm4MB: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 4(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBlockAsm4MB: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBlockAsm4MB - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBlockAsm4MB - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeBlockAsm4MB - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeBlockAsm4MB - -three_bytes_emit_remainder_encodeBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBlockAsm4MB - -two_bytes_emit_remainder_encodeBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBlockAsm4MB - JMP memmove_long_emit_remainder_encodeBlockAsm4MB - -one_byte_emit_remainder_encodeBlockAsm4MB: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBlockAsm4MB: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBlockAsm4MB: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB - -memmove_long_emit_remainder_encodeBlockAsm4MB: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBlockAsm4MB: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBlockAsm12B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000080, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBlockAsm12B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBlockAsm12B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBlockAsm12B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBlockAsm12B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x18, R11 - IMULQ R9, R11 - SHRQ $0x34, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeBlockAsm12B - LEAL 1(DX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI - JZ repeat_extend_back_end_encodeBlockAsm12B - -repeat_extend_back_loop_encodeBlockAsm12B: - CMPL DI, R8 - JBE repeat_extend_back_end_encodeBlockAsm12B - MOVB -1(BX)(SI*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeBlockAsm12B - LEAL -1(DI), DI - DECL SI - JNZ repeat_extend_back_loop_encodeBlockAsm12B - -repeat_extend_back_end_encodeBlockAsm12B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeBlockAsm12B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeBlockAsm12B - JB three_bytes_repeat_emit_encodeBlockAsm12B - -three_bytes_repeat_emit_encodeBlockAsm12B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeBlockAsm12B - -two_bytes_repeat_emit_encodeBlockAsm12B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeBlockAsm12B - JMP memmove_long_repeat_emit_encodeBlockAsm12B - -one_byte_repeat_emit_encodeBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_repeat_emit_encodeBlockAsm12B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeBlockAsm12B - -memmove_long_repeat_emit_encodeBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeBlockAsm12B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL DX, R9 - LEAQ (BX)(DX*1), R10 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x10 - JB matchlen_match8_repeat_extend_encodeBlockAsm12B - MOVQ (R10)(R12*1), R11 - MOVQ 8(R10)(R12*1), R13 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B - XORQ 8(SI)(R12*1), R13 - JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B - LEAL -16(R9), R9 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B - -matchlen_bsf_16repeat_extend_encodeBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm12B - -matchlen_match8_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x08 - JB matchlen_match4_repeat_extend_encodeBlockAsm12B - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B - LEAL -8(R9), R9 - LEAL 8(R12), R12 - JMP matchlen_match4_repeat_extend_encodeBlockAsm12B - -matchlen_bsf_8_repeat_extend_encodeBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm12B - -matchlen_match4_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x04 - JB matchlen_match2_repeat_extend_encodeBlockAsm12B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 - JNE matchlen_match2_repeat_extend_encodeBlockAsm12B - LEAL -4(R9), R9 - LEAL 4(R12), R12 - -matchlen_match2_repeat_extend_encodeBlockAsm12B: - CMPL R9, $0x01 - JE matchlen_match1_repeat_extend_encodeBlockAsm12B - JB repeat_extend_forward_end_encodeBlockAsm12B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 - JNE matchlen_match1_repeat_extend_encodeBlockAsm12B - LEAL 2(R12), R12 - SUBL $0x02, R9 - JZ repeat_extend_forward_end_encodeBlockAsm12B - -matchlen_match1_repeat_extend_encodeBlockAsm12B: - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 - JNE repeat_extend_forward_end_encodeBlockAsm12B - LEAL 1(R12), R12 - -repeat_extend_forward_end_encodeBlockAsm12B: - ADDL R12, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 - JZ repeat_as_copy_encodeBlockAsm12B - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_match_repeat_encodeBlockAsm12B - CMPL R8, $0x0c - JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B - CMPL DI, $0x00000800 - JB repeat_two_offset_match_repeat_encodeBlockAsm12B - -cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: - CMPL SI, $0x00000104 - JB repeat_three_match_repeat_encodeBlockAsm12B - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_three_match_repeat_encodeBlockAsm12B: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_match_repeat_encodeBlockAsm12B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_offset_match_repeat_encodeBlockAsm12B: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_as_copy_encodeBlockAsm12B: - // emitCopy - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B - CMPL DI, $0x00000800 - JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - SUBL $0x08, SI - - // emitRepeat - LEAL -4(SI), SI - JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -long_offset_short_repeat_as_copy_encodeBlockAsm12B: - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm12B - -emit_copy_three_repeat_as_copy_encodeBlockAsm12B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeBlockAsm12B: - MOVL DX, 12(SP) - JMP search_loop_encodeBlockAsm12B - -no_repeat_found_encodeBlockAsm12B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBlockAsm12B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeBlockAsm12B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeBlockAsm12B - MOVL 20(SP), DX - JMP search_loop_encodeBlockAsm12B - -candidate3_match_encodeBlockAsm12B: - ADDL $0x02, DX - JMP candidate_match_encodeBlockAsm12B - -candidate2_match_encodeBlockAsm12B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBlockAsm12B - -match_extend_back_loop_encodeBlockAsm12B: - CMPL DX, DI - JBE match_extend_back_end_encodeBlockAsm12B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBlockAsm12B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBlockAsm12B - JMP match_extend_back_loop_encodeBlockAsm12B - -match_extend_back_end_encodeBlockAsm12B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBlockAsm12B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeBlockAsm12B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeBlockAsm12B - JB three_bytes_match_emit_encodeBlockAsm12B - -three_bytes_match_emit_encodeBlockAsm12B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBlockAsm12B - -two_bytes_match_emit_encodeBlockAsm12B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeBlockAsm12B - JMP memmove_long_match_emit_encodeBlockAsm12B - -one_byte_match_emit_encodeBlockAsm12B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBlockAsm12B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeBlockAsm12B - -emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm12B - -emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm12B - -emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBlockAsm12B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeBlockAsm12B - -memmove_long_match_emit_encodeBlockAsm12B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeBlockAsm12B: -match_nolit_loop_encodeBlockAsm12B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeBlockAsm12B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B - -matchlen_bsf_16match_nolit_encodeBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeBlockAsm12B - -matchlen_match8_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeBlockAsm12B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeBlockAsm12B - -matchlen_bsf_8_match_nolit_encodeBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeBlockAsm12B - -matchlen_match4_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeBlockAsm12B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeBlockAsm12B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeBlockAsm12B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeBlockAsm12B - JB match_nolit_end_encodeBlockAsm12B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeBlockAsm12B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeBlockAsm12B - -matchlen_match1_match_nolit_encodeBlockAsm12B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeBlockAsm12B - LEAL 1(R10), R10 - -match_nolit_end_encodeBlockAsm12B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B - CMPL SI, $0x00000800 - JAE long_offset_short_match_nolit_encodeBlockAsm12B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -long_offset_short_match_nolit_encodeBlockAsm12B: - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - - // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -two_byte_offset_short_match_nolit_encodeBlockAsm12B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeBlockAsm12B - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBlockAsm12B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm12B - -emit_copy_three_match_nolit_encodeBlockAsm12B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeBlockAsm12B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBlockAsm12B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBlockAsm12B: - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x18, R8 - IMULQ R9, R8 - SHRQ $0x34, R8 - SHLQ $0x18, SI - IMULQ R9, SI - SHRQ $0x34, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeBlockAsm12B - INCL DX - JMP search_loop_encodeBlockAsm12B - -emit_remainder_encodeBlockAsm12B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBlockAsm12B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBlockAsm12B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBlockAsm12B - JB three_bytes_emit_remainder_encodeBlockAsm12B - -three_bytes_emit_remainder_encodeBlockAsm12B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBlockAsm12B - -two_bytes_emit_remainder_encodeBlockAsm12B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBlockAsm12B - JMP memmove_long_emit_remainder_encodeBlockAsm12B - -one_byte_emit_remainder_encodeBlockAsm12B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBlockAsm12B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBlockAsm12B - -memmove_long_emit_remainder_encodeBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBlockAsm12B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBlockAsm10B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000020, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBlockAsm10B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBlockAsm10B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBlockAsm10B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBlockAsm10B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x36, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeBlockAsm10B - LEAL 1(DX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI - JZ repeat_extend_back_end_encodeBlockAsm10B - -repeat_extend_back_loop_encodeBlockAsm10B: - CMPL DI, R8 - JBE repeat_extend_back_end_encodeBlockAsm10B - MOVB -1(BX)(SI*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeBlockAsm10B - LEAL -1(DI), DI - DECL SI - JNZ repeat_extend_back_loop_encodeBlockAsm10B - -repeat_extend_back_end_encodeBlockAsm10B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeBlockAsm10B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeBlockAsm10B - JB three_bytes_repeat_emit_encodeBlockAsm10B - -three_bytes_repeat_emit_encodeBlockAsm10B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeBlockAsm10B - -two_bytes_repeat_emit_encodeBlockAsm10B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeBlockAsm10B - JMP memmove_long_repeat_emit_encodeBlockAsm10B - -one_byte_repeat_emit_encodeBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_repeat_emit_encodeBlockAsm10B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeBlockAsm10B - -memmove_long_repeat_emit_encodeBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeBlockAsm10B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL DX, R9 - LEAQ (BX)(DX*1), R10 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x10 - JB matchlen_match8_repeat_extend_encodeBlockAsm10B - MOVQ (R10)(R12*1), R11 - MOVQ 8(R10)(R12*1), R13 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B - XORQ 8(SI)(R12*1), R13 - JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B - LEAL -16(R9), R9 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B - -matchlen_bsf_16repeat_extend_encodeBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm10B - -matchlen_match8_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x08 - JB matchlen_match4_repeat_extend_encodeBlockAsm10B - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B - LEAL -8(R9), R9 - LEAL 8(R12), R12 - JMP matchlen_match4_repeat_extend_encodeBlockAsm10B - -matchlen_bsf_8_repeat_extend_encodeBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm10B - -matchlen_match4_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x04 - JB matchlen_match2_repeat_extend_encodeBlockAsm10B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 - JNE matchlen_match2_repeat_extend_encodeBlockAsm10B - LEAL -4(R9), R9 - LEAL 4(R12), R12 - -matchlen_match2_repeat_extend_encodeBlockAsm10B: - CMPL R9, $0x01 - JE matchlen_match1_repeat_extend_encodeBlockAsm10B - JB repeat_extend_forward_end_encodeBlockAsm10B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 - JNE matchlen_match1_repeat_extend_encodeBlockAsm10B - LEAL 2(R12), R12 - SUBL $0x02, R9 - JZ repeat_extend_forward_end_encodeBlockAsm10B - -matchlen_match1_repeat_extend_encodeBlockAsm10B: - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 - JNE repeat_extend_forward_end_encodeBlockAsm10B - LEAL 1(R12), R12 - -repeat_extend_forward_end_encodeBlockAsm10B: - ADDL R12, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 - JZ repeat_as_copy_encodeBlockAsm10B - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_match_repeat_encodeBlockAsm10B - CMPL R8, $0x0c - JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B - CMPL DI, $0x00000800 - JB repeat_two_offset_match_repeat_encodeBlockAsm10B - -cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: - CMPL SI, $0x00000104 - JB repeat_three_match_repeat_encodeBlockAsm10B - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_three_match_repeat_encodeBlockAsm10B: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_match_repeat_encodeBlockAsm10B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_offset_match_repeat_encodeBlockAsm10B: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_as_copy_encodeBlockAsm10B: - // emitCopy - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B - CMPL DI, $0x00000800 - JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - SUBL $0x08, SI - - // emitRepeat - LEAL -4(SI), SI - JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -long_offset_short_repeat_as_copy_encodeBlockAsm10B: - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - - // emitRepeat - MOVL SI, R8 - LEAL -4(SI), SI - CMPL R8, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - CMPL DI, $0x00000800 - JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm10B - -emit_copy_three_repeat_as_copy_encodeBlockAsm10B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeBlockAsm10B: - MOVL DX, 12(SP) - JMP search_loop_encodeBlockAsm10B - -no_repeat_found_encodeBlockAsm10B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBlockAsm10B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeBlockAsm10B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeBlockAsm10B - MOVL 20(SP), DX - JMP search_loop_encodeBlockAsm10B - -candidate3_match_encodeBlockAsm10B: - ADDL $0x02, DX - JMP candidate_match_encodeBlockAsm10B - -candidate2_match_encodeBlockAsm10B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBlockAsm10B - -match_extend_back_loop_encodeBlockAsm10B: - CMPL DX, DI - JBE match_extend_back_end_encodeBlockAsm10B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBlockAsm10B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBlockAsm10B - JMP match_extend_back_loop_encodeBlockAsm10B - -match_extend_back_end_encodeBlockAsm10B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBlockAsm10B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeBlockAsm10B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeBlockAsm10B - JB three_bytes_match_emit_encodeBlockAsm10B - -three_bytes_match_emit_encodeBlockAsm10B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBlockAsm10B - -two_bytes_match_emit_encodeBlockAsm10B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeBlockAsm10B - JMP memmove_long_match_emit_encodeBlockAsm10B - -one_byte_match_emit_encodeBlockAsm10B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBlockAsm10B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeBlockAsm10B - -emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm10B - -emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm10B - -emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBlockAsm10B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeBlockAsm10B - -memmove_long_match_emit_encodeBlockAsm10B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeBlockAsm10B: -match_nolit_loop_encodeBlockAsm10B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeBlockAsm10B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B - -matchlen_bsf_16match_nolit_encodeBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeBlockAsm10B - -matchlen_match8_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeBlockAsm10B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeBlockAsm10B - -matchlen_bsf_8_match_nolit_encodeBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeBlockAsm10B - -matchlen_match4_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeBlockAsm10B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeBlockAsm10B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeBlockAsm10B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeBlockAsm10B - JB match_nolit_end_encodeBlockAsm10B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeBlockAsm10B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeBlockAsm10B - -matchlen_match1_match_nolit_encodeBlockAsm10B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeBlockAsm10B - LEAL 1(R10), R10 - -match_nolit_end_encodeBlockAsm10B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B - CMPL SI, $0x00000800 - JAE long_offset_short_match_nolit_encodeBlockAsm10B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -long_offset_short_match_nolit_encodeBlockAsm10B: - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - - // emitRepeat - MOVL R10, DI - LEAL -4(R10), R10 - CMPL DI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short - CMPL SI, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -two_byte_offset_short_match_nolit_encodeBlockAsm10B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeBlockAsm10B - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBlockAsm10B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm10B - -emit_copy_three_match_nolit_encodeBlockAsm10B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeBlockAsm10B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBlockAsm10B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBlockAsm10B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x36, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x36, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeBlockAsm10B - INCL DX - JMP search_loop_encodeBlockAsm10B - -emit_remainder_encodeBlockAsm10B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBlockAsm10B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBlockAsm10B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBlockAsm10B - JB three_bytes_emit_remainder_encodeBlockAsm10B - -three_bytes_emit_remainder_encodeBlockAsm10B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBlockAsm10B - -two_bytes_emit_remainder_encodeBlockAsm10B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBlockAsm10B - JMP memmove_long_emit_remainder_encodeBlockAsm10B - -one_byte_emit_remainder_encodeBlockAsm10B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBlockAsm10B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBlockAsm10B - -memmove_long_emit_remainder_encodeBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBlockAsm10B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBlockAsm8B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000008, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBlockAsm8B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBlockAsm8B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBlockAsm8B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBlockAsm8B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x38, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeBlockAsm8B - LEAL 1(DX), DI - MOVL 12(SP), R8 - MOVL DI, SI - SUBL 16(SP), SI - JZ repeat_extend_back_end_encodeBlockAsm8B - -repeat_extend_back_loop_encodeBlockAsm8B: - CMPL DI, R8 - JBE repeat_extend_back_end_encodeBlockAsm8B - MOVB -1(BX)(SI*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeBlockAsm8B - LEAL -1(DI), DI - DECL SI - JNZ repeat_extend_back_loop_encodeBlockAsm8B - -repeat_extend_back_end_encodeBlockAsm8B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeBlockAsm8B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeBlockAsm8B - JB three_bytes_repeat_emit_encodeBlockAsm8B - -three_bytes_repeat_emit_encodeBlockAsm8B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeBlockAsm8B - -two_bytes_repeat_emit_encodeBlockAsm8B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeBlockAsm8B - JMP memmove_long_repeat_emit_encodeBlockAsm8B - -one_byte_repeat_emit_encodeBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_repeat_emit_encodeBlockAsm8B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeBlockAsm8B - -memmove_long_repeat_emit_encodeBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R12 - SHRQ $0x05, R12 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R13*1), R11 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R13*1), X4 - MOVOU -16(R10)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R9, R13 - JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeBlockAsm8B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R9 - SUBL DX, R9 - LEAQ (BX)(DX*1), R10 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x10 - JB matchlen_match8_repeat_extend_encodeBlockAsm8B - MOVQ (R10)(R12*1), R11 - MOVQ 8(R10)(R12*1), R13 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B - XORQ 8(SI)(R12*1), R13 - JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B - LEAL -16(R9), R9 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B - -matchlen_bsf_16repeat_extend_encodeBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm8B - -matchlen_match8_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x08 - JB matchlen_match4_repeat_extend_encodeBlockAsm8B - MOVQ (R10)(R12*1), R11 - XORQ (SI)(R12*1), R11 - JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B - LEAL -8(R9), R9 - LEAL 8(R12), R12 - JMP matchlen_match4_repeat_extend_encodeBlockAsm8B - -matchlen_bsf_8_repeat_extend_encodeBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP repeat_extend_forward_end_encodeBlockAsm8B - -matchlen_match4_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x04 - JB matchlen_match2_repeat_extend_encodeBlockAsm8B - MOVL (R10)(R12*1), R11 - CMPL (SI)(R12*1), R11 - JNE matchlen_match2_repeat_extend_encodeBlockAsm8B - LEAL -4(R9), R9 - LEAL 4(R12), R12 - -matchlen_match2_repeat_extend_encodeBlockAsm8B: - CMPL R9, $0x01 - JE matchlen_match1_repeat_extend_encodeBlockAsm8B - JB repeat_extend_forward_end_encodeBlockAsm8B - MOVW (R10)(R12*1), R11 - CMPW (SI)(R12*1), R11 - JNE matchlen_match1_repeat_extend_encodeBlockAsm8B - LEAL 2(R12), R12 - SUBL $0x02, R9 - JZ repeat_extend_forward_end_encodeBlockAsm8B - -matchlen_match1_repeat_extend_encodeBlockAsm8B: - MOVB (R10)(R12*1), R11 - CMPB (SI)(R12*1), R11 - JNE repeat_extend_forward_end_encodeBlockAsm8B - LEAL 1(R12), R12 - -repeat_extend_forward_end_encodeBlockAsm8B: - ADDL R12, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - TESTL R8, R8 - JZ repeat_as_copy_encodeBlockAsm8B - - // emitRepeat - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 - JBE repeat_two_match_repeat_encodeBlockAsm8B - CMPL DI, $0x0c - JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B - -cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: - CMPL SI, $0x00000104 - JB repeat_three_match_repeat_encodeBlockAsm8B - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_three_match_repeat_encodeBlockAsm8B: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_two_match_repeat_encodeBlockAsm8B: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_as_copy_encodeBlockAsm8B: - // emitCopy - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B - CMPL DI, $0x00000800 - JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - SUBL $0x08, SI - - // emitRepeat - LEAL -4(SI), SI - JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - CMPL DI, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - -long_offset_short_repeat_as_copy_encodeBlockAsm8B: - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - - // emitRepeat - MOVL SI, DI - LEAL -4(SI), SI - CMPL DI, $0x08 - JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short - CMPL DI, $0x0c - JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short - -cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - CMPL SI, $0x00000104 - JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short - LEAL -256(SI), SI - MOVW $0x0019, (CX) - MOVW SI, 2(CX) - ADDQ $0x04, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - LEAL -4(SI), SI - MOVW $0x0015, (CX) - MOVB SI, 2(CX) - ADDQ $0x03, CX - JMP repeat_end_emit_encodeBlockAsm8B - -repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: - SHLL $0x02, SI - ORL $0x01, SI - MOVW SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - XORQ R8, R8 - LEAL 1(R8)(SI*4), SI - MOVB DI, 1(CX) - SARL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - -two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeBlockAsm8B - -emit_copy_three_repeat_as_copy_encodeBlockAsm8B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeBlockAsm8B: - MOVL DX, 12(SP) - JMP search_loop_encodeBlockAsm8B - -no_repeat_found_encodeBlockAsm8B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBlockAsm8B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeBlockAsm8B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeBlockAsm8B - MOVL 20(SP), DX - JMP search_loop_encodeBlockAsm8B - -candidate3_match_encodeBlockAsm8B: - ADDL $0x02, DX - JMP candidate_match_encodeBlockAsm8B - -candidate2_match_encodeBlockAsm8B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBlockAsm8B - -match_extend_back_loop_encodeBlockAsm8B: - CMPL DX, DI - JBE match_extend_back_end_encodeBlockAsm8B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBlockAsm8B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBlockAsm8B - JMP match_extend_back_loop_encodeBlockAsm8B - -match_extend_back_end_encodeBlockAsm8B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBlockAsm8B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeBlockAsm8B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeBlockAsm8B - JB three_bytes_match_emit_encodeBlockAsm8B - -three_bytes_match_emit_encodeBlockAsm8B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBlockAsm8B - -two_bytes_match_emit_encodeBlockAsm8B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeBlockAsm8B - JMP memmove_long_match_emit_encodeBlockAsm8B - -one_byte_match_emit_encodeBlockAsm8B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBlockAsm8B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeBlockAsm8B - -emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm8B - -emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBlockAsm8B - -emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBlockAsm8B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeBlockAsm8B - -memmove_long_match_emit_encodeBlockAsm8B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeBlockAsm8B: -match_nolit_loop_encodeBlockAsm8B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeBlockAsm8B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B - -matchlen_bsf_16match_nolit_encodeBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeBlockAsm8B - -matchlen_match8_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeBlockAsm8B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeBlockAsm8B - -matchlen_bsf_8_match_nolit_encodeBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeBlockAsm8B - -matchlen_match4_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeBlockAsm8B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeBlockAsm8B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeBlockAsm8B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeBlockAsm8B - JB match_nolit_end_encodeBlockAsm8B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeBlockAsm8B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeBlockAsm8B - -matchlen_match1_match_nolit_encodeBlockAsm8B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeBlockAsm8B - LEAL 1(R10), R10 - -match_nolit_end_encodeBlockAsm8B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B - CMPL SI, $0x00000800 - JAE long_offset_short_match_nolit_encodeBlockAsm8B - MOVL $0x00000001, DI - LEAL 16(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - MOVL R10, SI - LEAL -4(R10), R10 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -long_offset_short_match_nolit_encodeBlockAsm8B: - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - - // emitRepeat - MOVL R10, SI - LEAL -4(R10), R10 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short - LEAL -256(R10), R10 - MOVW $0x0019, (CX) - MOVW R10, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (CX) - MOVB R10, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - XORQ DI, DI - LEAL 1(DI)(R10*4), R10 - MOVB SI, 1(CX) - SARL $0x08, SI - SHLL $0x05, SI - ORL SI, R10 - MOVB R10, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -two_byte_offset_short_match_nolit_encodeBlockAsm8B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeBlockAsm8B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBlockAsm8B - -emit_copy_three_match_nolit_encodeBlockAsm8B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeBlockAsm8B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBlockAsm8B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBlockAsm8B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x38, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x38, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeBlockAsm8B - INCL DX - JMP search_loop_encodeBlockAsm8B - -emit_remainder_encodeBlockAsm8B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBlockAsm8B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBlockAsm8B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBlockAsm8B - JB three_bytes_emit_remainder_encodeBlockAsm8B - -three_bytes_emit_remainder_encodeBlockAsm8B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBlockAsm8B - -two_bytes_emit_remainder_encodeBlockAsm8B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBlockAsm8B - JMP memmove_long_emit_remainder_encodeBlockAsm8B - -one_byte_emit_remainder_encodeBlockAsm8B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBlockAsm8B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBlockAsm8B - -memmove_long_emit_remainder_encodeBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBlockAsm8B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00001200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBetterBlockAsm: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBetterBlockAsm - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -6(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBetterBlockAsm: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 - JBE check_maxskip_ok_encodeBetterBlockAsm - LEAL 100(DX), SI - JMP check_maxskip_cont_encodeBetterBlockAsm - -check_maxskip_ok_encodeBetterBlockAsm: - LEAL 1(DX)(SI*1), SI - -check_maxskip_cont_encodeBetterBlockAsm: - CMPL SI, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL 524288(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 524288(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeBetterBlockAsm - CMPQ R11, DI - JNE no_short_found_encodeBetterBlockAsm - MOVL R8, SI - JMP candidate_match_encodeBetterBlockAsm - -no_short_found_encodeBetterBlockAsm: - CMPL R10, DI - JEQ candidate_match_encodeBetterBlockAsm - CMPL R11, DI - JEQ candidateS_match_encodeBetterBlockAsm - MOVL 20(SP), DX - JMP search_loop_encodeBetterBlockAsm - -candidateS_match_encodeBetterBlockAsm: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBetterBlockAsm - DECL DX - MOVL R8, SI - -candidate_match_encodeBetterBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBetterBlockAsm - -match_extend_back_loop_encodeBetterBlockAsm: - CMPL DX, DI - JBE match_extend_back_end_encodeBetterBlockAsm - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBetterBlockAsm - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBetterBlockAsm - JMP match_extend_back_loop_encodeBetterBlockAsm - -match_extend_back_end_encodeBetterBlockAsm: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 5(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBetterBlockAsm: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeBetterBlockAsm - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm - -matchlen_bsf_16match_nolit_encodeBetterBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm - -matchlen_match8_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeBetterBlockAsm - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeBetterBlockAsm - -matchlen_bsf_8_match_nolit_encodeBetterBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm - -matchlen_match4_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeBetterBlockAsm - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeBetterBlockAsm - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeBetterBlockAsm: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeBetterBlockAsm - JB match_nolit_end_encodeBetterBlockAsm - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeBetterBlockAsm - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeBetterBlockAsm - -matchlen_match1_match_nolit_encodeBetterBlockAsm: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeBetterBlockAsm - LEAL 1(R12), R12 - -match_nolit_end_encodeBetterBlockAsm: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL 16(SP), R8 - JEQ match_is_repeat_encodeBetterBlockAsm - CMPL R12, $0x01 - JA match_length_ok_encodeBetterBlockAsm - CMPL R8, $0x0000ffff - JBE match_length_ok_encodeBetterBlockAsm - MOVL 20(SP), DX - INCL DX - JMP search_loop_encodeBetterBlockAsm - -match_length_ok_encodeBetterBlockAsm: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeBetterBlockAsm - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeBetterBlockAsm - CMPL SI, $0x00010000 - JB three_bytes_match_emit_encodeBetterBlockAsm - CMPL SI, $0x01000000 - JB four_bytes_match_emit_encodeBetterBlockAsm - MOVB $0xfc, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm - -four_bytes_match_emit_encodeBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm - -three_bytes_match_emit_encodeBetterBlockAsm: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm - -two_bytes_match_emit_encodeBetterBlockAsm: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeBetterBlockAsm - JMP memmove_long_match_emit_encodeBetterBlockAsm - -one_byte_match_emit_encodeBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBetterBlockAsm: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeBetterBlockAsm - -memmove_long_match_emit_encodeBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeBetterBlockAsm: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R8, $0x00010000 - JB two_byte_offset_match_nolit_encodeBetterBlockAsm - CMPL R12, $0x40 - JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm - MOVB $0xff, (CX) - MOVL R8, 1(CX) - LEAL -64(R12), R12 - ADDQ $0x05, CX - CMPL R12, $0x04 - JB four_bytes_remain_match_nolit_encodeBetterBlockAsm - - // emitRepeat -emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy - CMPL R12, $0x0100ffff - JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy - LEAL -16842747(R12), R12 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy - -repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -four_bytes_remain_match_nolit_encodeBetterBlockAsm: - TESTL R12, R12 - JZ match_nolit_emitcopy_end_encodeBetterBlockAsm - XORL SI, SI - LEAL -1(SI)(R12*4), R12 - MOVB R12, (CX) - MOVL R8, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -two_byte_offset_match_nolit_encodeBetterBlockAsm: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm - CMPL R8, $0x00000800 - JAE long_offset_short_match_nolit_encodeBetterBlockAsm - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(CX) - MOVL R8, R9 - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, SI - MOVB SI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R12 - - // emitRepeat - LEAL -4(R12), R12 - JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - -emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - CMPL R12, $0x0100ffff - JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - LEAL -16842747(R12), R12 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b - -repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -long_offset_short_match_nolit_encodeBetterBlockAsm: - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - - // emitRepeat -emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short - CMPL R12, $0x0100ffff - JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short - LEAL -16842747(R12), R12 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short - -repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -two_byte_offset_short_match_nolit_encodeBetterBlockAsm: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -emit_copy_three_match_nolit_encodeBetterBlockAsm: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -match_is_repeat_encodeBetterBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x00000100 - JB two_bytes_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x00010000 - JB three_bytes_match_emit_repeat_encodeBetterBlockAsm - CMPL SI, $0x01000000 - JB four_bytes_match_emit_repeat_encodeBetterBlockAsm - MOVB $0xfc, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm - -four_bytes_match_emit_repeat_encodeBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm - -three_bytes_match_emit_repeat_encodeBetterBlockAsm: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm - -two_bytes_match_emit_repeat_encodeBetterBlockAsm: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_repeat_encodeBetterBlockAsm - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm - -one_byte_match_emit_repeat_encodeBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_repeat_encodeBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: - MOVQ SI, CX - JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm - -memmove_long_match_emit_repeat_encodeBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitRepeat -emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm - -cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm - CMPL R12, $0x0100ffff - JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm - LEAL -16842747(R12), R12 - MOVL $0xfffb001d, (CX) - MOVB $0xff, 4(CX) - ADDQ $0x05, CX - JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm - -repeat_five_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_four_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_three_match_nolit_repeat_encodeBetterBlockAsm: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_match_nolit_repeat_encodeBetterBlockAsm: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm - -repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - -match_nolit_emitcopy_end_encodeBetterBlockAsm: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBetterBlockAsm: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R12 - IMULQ SI, R12 - SHRQ $0x2f, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x32, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 524288(AX)(R11*4) - MOVL R14, 524288(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeBetterBlockAsm: - CMPQ R8, R9 - JAE search_loop_encodeBetterBlockAsm - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x08, R11 - IMULQ SI, R11 - SHRQ $0x2f, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeBetterBlockAsm - -emit_remainder_encodeBetterBlockAsm: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 5(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBetterBlockAsm: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBetterBlockAsm - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBetterBlockAsm - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeBetterBlockAsm - CMPL DX, $0x01000000 - JB four_bytes_emit_remainder_encodeBetterBlockAsm - MOVB $0xfc, (CX) - MOVL DX, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm - -four_bytes_emit_remainder_encodeBetterBlockAsm: - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm - -three_bytes_emit_remainder_encodeBetterBlockAsm: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm - -two_bytes_emit_remainder_encodeBetterBlockAsm: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBetterBlockAsm - JMP memmove_long_emit_remainder_encodeBetterBlockAsm - -one_byte_emit_remainder_encodeBetterBlockAsm: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBetterBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBetterBlockAsm: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm - -memmove_long_emit_remainder_encodeBetterBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBetterBlockAsm: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm4MB(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00001200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBetterBlockAsm4MB: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBetterBlockAsm4MB - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -6(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBetterBlockAsm4MB: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 - JBE check_maxskip_ok_encodeBetterBlockAsm4MB - LEAL 100(DX), SI - JMP check_maxskip_cont_encodeBetterBlockAsm4MB - -check_maxskip_ok_encodeBetterBlockAsm4MB: - LEAL 1(DX)(SI*1), SI - -check_maxskip_cont_encodeBetterBlockAsm4MB: - CMPL SI, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm4MB - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL 524288(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 524288(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeBetterBlockAsm4MB - CMPQ R11, DI - JNE no_short_found_encodeBetterBlockAsm4MB - MOVL R8, SI - JMP candidate_match_encodeBetterBlockAsm4MB - -no_short_found_encodeBetterBlockAsm4MB: - CMPL R10, DI - JEQ candidate_match_encodeBetterBlockAsm4MB - CMPL R11, DI - JEQ candidateS_match_encodeBetterBlockAsm4MB - MOVL 20(SP), DX - JMP search_loop_encodeBetterBlockAsm4MB - -candidateS_match_encodeBetterBlockAsm4MB: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBetterBlockAsm4MB - DECL DX - MOVL R8, SI - -candidate_match_encodeBetterBlockAsm4MB: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBetterBlockAsm4MB - -match_extend_back_loop_encodeBetterBlockAsm4MB: - CMPL DX, DI - JBE match_extend_back_end_encodeBetterBlockAsm4MB - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBetterBlockAsm4MB - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBetterBlockAsm4MB - JMP match_extend_back_loop_encodeBetterBlockAsm4MB - -match_extend_back_end_encodeBetterBlockAsm4MB: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 4(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBetterBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBetterBlockAsm4MB: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB - -matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm4MB - -matchlen_match8_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB - -matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm4MB - -matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB - JB match_nolit_end_encodeBetterBlockAsm4MB - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeBetterBlockAsm4MB - -matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeBetterBlockAsm4MB - LEAL 1(R12), R12 - -match_nolit_end_encodeBetterBlockAsm4MB: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL 16(SP), R8 - JEQ match_is_repeat_encodeBetterBlockAsm4MB - CMPL R12, $0x01 - JA match_length_ok_encodeBetterBlockAsm4MB - CMPL R8, $0x0000ffff - JBE match_length_ok_encodeBetterBlockAsm4MB - MOVL 20(SP), DX - INCL DX - JMP search_loop_encodeBetterBlockAsm4MB - -match_length_ok_encodeBetterBlockAsm4MB: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeBetterBlockAsm4MB - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeBetterBlockAsm4MB - CMPL SI, $0x00010000 - JB three_bytes_match_emit_encodeBetterBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm4MB - -three_bytes_match_emit_encodeBetterBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm4MB - -two_bytes_match_emit_encodeBetterBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeBetterBlockAsm4MB - JMP memmove_long_match_emit_encodeBetterBlockAsm4MB - -one_byte_match_emit_encodeBetterBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBetterBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB - -memmove_long_match_emit_encodeBetterBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeBetterBlockAsm4MB: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R8, $0x00010000 - JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB - CMPL R12, $0x40 - JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB - MOVB $0xff, (CX) - MOVL R8, 1(CX) - LEAL -64(R12), R12 - ADDQ $0x05, CX - CMPL R12, $0x04 - JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: - TESTL R12, R12 - JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - XORL SI, SI - LEAL -1(SI)(R12*4), R12 - MOVB R12, (CX) - MOVL R8, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 - JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R12 - - // emitRepeat - LEAL -4(R12), R12 - JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -long_offset_short_match_nolit_encodeBetterBlockAsm4MB: - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -match_is_repeat_encodeBetterBlockAsm4MB: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x00000100 - JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x00010000 - JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB - -three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB - -two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB - -one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_repeat_encodeBetterBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: - MOVQ SI, CX - JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB - -memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB - -cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB - CMPL R12, $0x00010100 - JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB - LEAL -65536(R12), R12 - MOVL R12, R8 - MOVW $0x001d, (CX) - MOVW R12, 2(CX) - SARL $0x10, R8 - MOVB R8, 4(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB - -repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - -match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm4MB - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBetterBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBetterBlockAsm4MB: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R12 - IMULQ SI, R12 - SHRQ $0x2f, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x32, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 524288(AX)(R11*4) - MOVL R14, 524288(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeBetterBlockAsm4MB: - CMPQ R8, R9 - JAE search_loop_encodeBetterBlockAsm4MB - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x08, R11 - IMULQ SI, R11 - SHRQ $0x2f, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeBetterBlockAsm4MB - -emit_remainder_encodeBetterBlockAsm4MB: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 4(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBetterBlockAsm4MB - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBetterBlockAsm4MB: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBetterBlockAsm4MB - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB - -three_bytes_emit_remainder_encodeBetterBlockAsm4MB: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB - -two_bytes_emit_remainder_encodeBetterBlockAsm4MB: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBetterBlockAsm4MB - JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB - -one_byte_emit_remainder_encodeBetterBlockAsm4MB: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBetterBlockAsm4MB: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB - -memmove_long_emit_remainder_encodeBetterBlockAsm4MB: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm12B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000280, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBetterBlockAsm12B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBetterBlockAsm12B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -6(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBetterBlockAsm12B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm12B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL (AX)(R10*4), SI - MOVL 65536(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 65536(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeBetterBlockAsm12B - CMPQ R11, DI - JNE no_short_found_encodeBetterBlockAsm12B - MOVL R8, SI - JMP candidate_match_encodeBetterBlockAsm12B - -no_short_found_encodeBetterBlockAsm12B: - CMPL R10, DI - JEQ candidate_match_encodeBetterBlockAsm12B - CMPL R11, DI - JEQ candidateS_match_encodeBetterBlockAsm12B - MOVL 20(SP), DX - JMP search_loop_encodeBetterBlockAsm12B - -candidateS_match_encodeBetterBlockAsm12B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBetterBlockAsm12B - DECL DX - MOVL R8, SI - -candidate_match_encodeBetterBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBetterBlockAsm12B - -match_extend_back_loop_encodeBetterBlockAsm12B: - CMPL DX, DI - JBE match_extend_back_end_encodeBetterBlockAsm12B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBetterBlockAsm12B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBetterBlockAsm12B - JMP match_extend_back_loop_encodeBetterBlockAsm12B - -match_extend_back_end_encodeBetterBlockAsm12B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBetterBlockAsm12B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B - -matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm12B - -matchlen_match8_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B - -matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm12B - -matchlen_match4_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeBetterBlockAsm12B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B - JB match_nolit_end_encodeBetterBlockAsm12B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeBetterBlockAsm12B - -matchlen_match1_match_nolit_encodeBetterBlockAsm12B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeBetterBlockAsm12B - LEAL 1(R12), R12 - -match_nolit_end_encodeBetterBlockAsm12B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL 16(SP), R8 - JEQ match_is_repeat_encodeBetterBlockAsm12B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeBetterBlockAsm12B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeBetterBlockAsm12B - JB three_bytes_match_emit_encodeBetterBlockAsm12B - -three_bytes_match_emit_encodeBetterBlockAsm12B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm12B - -two_bytes_match_emit_encodeBetterBlockAsm12B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeBetterBlockAsm12B - JMP memmove_long_match_emit_encodeBetterBlockAsm12B - -one_byte_match_emit_encodeBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBetterBlockAsm12B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B - -memmove_long_match_emit_encodeBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeBetterBlockAsm12B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 - JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R12 - - // emitRepeat - LEAL -4(R12), R12 - JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -long_offset_short_match_nolit_encodeBetterBlockAsm12B: - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -emit_copy_three_match_nolit_encodeBetterBlockAsm12B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -match_is_repeat_encodeBetterBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B - JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B - -three_bytes_match_emit_repeat_encodeBetterBlockAsm12B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B - -two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_repeat_encodeBetterBlockAsm12B - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B - -one_byte_match_emit_repeat_encodeBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_repeat_encodeBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B - -memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B - -cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B - -repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - -match_nolit_emitcopy_end_encodeBetterBlockAsm12B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm12B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBetterBlockAsm12B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x32, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x34, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 65536(AX)(R11*4) - MOVL R14, 65536(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeBetterBlockAsm12B: - CMPQ R8, R9 - JAE search_loop_encodeBetterBlockAsm12B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeBetterBlockAsm12B - -emit_remainder_encodeBetterBlockAsm12B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBetterBlockAsm12B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBetterBlockAsm12B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBetterBlockAsm12B - JB three_bytes_emit_remainder_encodeBetterBlockAsm12B - -three_bytes_emit_remainder_encodeBetterBlockAsm12B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B - -two_bytes_emit_remainder_encodeBetterBlockAsm12B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBetterBlockAsm12B - JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B - -one_byte_emit_remainder_encodeBetterBlockAsm12B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBetterBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B - -memmove_long_emit_remainder_encodeBetterBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm10B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x000000a0, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBetterBlockAsm10B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBetterBlockAsm10B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -6(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBetterBlockAsm10B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm10B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL (AX)(R10*4), SI - MOVL 16384(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 16384(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeBetterBlockAsm10B - CMPQ R11, DI - JNE no_short_found_encodeBetterBlockAsm10B - MOVL R8, SI - JMP candidate_match_encodeBetterBlockAsm10B - -no_short_found_encodeBetterBlockAsm10B: - CMPL R10, DI - JEQ candidate_match_encodeBetterBlockAsm10B - CMPL R11, DI - JEQ candidateS_match_encodeBetterBlockAsm10B - MOVL 20(SP), DX - JMP search_loop_encodeBetterBlockAsm10B - -candidateS_match_encodeBetterBlockAsm10B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBetterBlockAsm10B - DECL DX - MOVL R8, SI - -candidate_match_encodeBetterBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBetterBlockAsm10B - -match_extend_back_loop_encodeBetterBlockAsm10B: - CMPL DX, DI - JBE match_extend_back_end_encodeBetterBlockAsm10B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBetterBlockAsm10B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBetterBlockAsm10B - JMP match_extend_back_loop_encodeBetterBlockAsm10B - -match_extend_back_end_encodeBetterBlockAsm10B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBetterBlockAsm10B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B - -matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm10B - -matchlen_match8_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B - -matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm10B - -matchlen_match4_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeBetterBlockAsm10B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B - JB match_nolit_end_encodeBetterBlockAsm10B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeBetterBlockAsm10B - -matchlen_match1_match_nolit_encodeBetterBlockAsm10B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeBetterBlockAsm10B - LEAL 1(R12), R12 - -match_nolit_end_encodeBetterBlockAsm10B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL 16(SP), R8 - JEQ match_is_repeat_encodeBetterBlockAsm10B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeBetterBlockAsm10B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeBetterBlockAsm10B - JB three_bytes_match_emit_encodeBetterBlockAsm10B - -three_bytes_match_emit_encodeBetterBlockAsm10B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm10B - -two_bytes_match_emit_encodeBetterBlockAsm10B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeBetterBlockAsm10B - JMP memmove_long_match_emit_encodeBetterBlockAsm10B - -one_byte_match_emit_encodeBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBetterBlockAsm10B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B - -memmove_long_match_emit_encodeBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeBetterBlockAsm10B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 - JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R12 - - // emitRepeat - LEAL -4(R12), R12 - JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -long_offset_short_match_nolit_encodeBetterBlockAsm10B: - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -emit_copy_three_match_nolit_encodeBetterBlockAsm10B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -match_is_repeat_encodeBetterBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B - JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B - -three_bytes_match_emit_repeat_encodeBetterBlockAsm10B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B - -two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_repeat_encodeBetterBlockAsm10B - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B - -one_byte_match_emit_repeat_encodeBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_repeat_encodeBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B - -memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B - CMPL R8, $0x00000800 - JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B - -cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B - -repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - -match_nolit_emitcopy_end_encodeBetterBlockAsm10B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm10B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBetterBlockAsm10B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x34, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x36, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 16384(AX)(R11*4) - MOVL R14, 16384(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeBetterBlockAsm10B: - CMPQ R8, R9 - JAE search_loop_encodeBetterBlockAsm10B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeBetterBlockAsm10B - -emit_remainder_encodeBetterBlockAsm10B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBetterBlockAsm10B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBetterBlockAsm10B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBetterBlockAsm10B - JB three_bytes_emit_remainder_encodeBetterBlockAsm10B - -three_bytes_emit_remainder_encodeBetterBlockAsm10B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B - -two_bytes_emit_remainder_encodeBetterBlockAsm10B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBetterBlockAsm10B - JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B - -one_byte_emit_remainder_encodeBetterBlockAsm10B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBetterBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B - -memmove_long_emit_remainder_encodeBetterBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeBetterBlockAsm8B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000028, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeBetterBlockAsm8B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeBetterBlockAsm8B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -6(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeBetterBlockAsm8B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm8B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x38, R11 - MOVL (AX)(R10*4), SI - MOVL 4096(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 4096(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeBetterBlockAsm8B - CMPQ R11, DI - JNE no_short_found_encodeBetterBlockAsm8B - MOVL R8, SI - JMP candidate_match_encodeBetterBlockAsm8B - -no_short_found_encodeBetterBlockAsm8B: - CMPL R10, DI - JEQ candidate_match_encodeBetterBlockAsm8B - CMPL R11, DI - JEQ candidateS_match_encodeBetterBlockAsm8B - MOVL 20(SP), DX - JMP search_loop_encodeBetterBlockAsm8B - -candidateS_match_encodeBetterBlockAsm8B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeBetterBlockAsm8B - DECL DX - MOVL R8, SI - -candidate_match_encodeBetterBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeBetterBlockAsm8B - -match_extend_back_loop_encodeBetterBlockAsm8B: - CMPL DX, DI - JBE match_extend_back_end_encodeBetterBlockAsm8B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeBetterBlockAsm8B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeBetterBlockAsm8B - JMP match_extend_back_loop_encodeBetterBlockAsm8B - -match_extend_back_end_encodeBetterBlockAsm8B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeBetterBlockAsm8B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B - -matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm8B - -matchlen_match8_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B - -matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeBetterBlockAsm8B - -matchlen_match4_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeBetterBlockAsm8B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B - JB match_nolit_end_encodeBetterBlockAsm8B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeBetterBlockAsm8B - -matchlen_match1_match_nolit_encodeBetterBlockAsm8B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeBetterBlockAsm8B - LEAL 1(R12), R12 - -match_nolit_end_encodeBetterBlockAsm8B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL 16(SP), R8 - JEQ match_is_repeat_encodeBetterBlockAsm8B - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeBetterBlockAsm8B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeBetterBlockAsm8B - JB three_bytes_match_emit_encodeBetterBlockAsm8B - -three_bytes_match_emit_encodeBetterBlockAsm8B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeBetterBlockAsm8B - -two_bytes_match_emit_encodeBetterBlockAsm8B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeBetterBlockAsm8B - JMP memmove_long_match_emit_encodeBetterBlockAsm8B - -one_byte_match_emit_encodeBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeBetterBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x04 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 - CMPQ R9, $0x08 - JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: - MOVL (R10), R11 - MOVL R11, (CX) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: - MOVL (R10), R11 - MOVL -4(R10)(R9*1), R10 - MOVL R11, (CX) - MOVL R10, -4(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeBetterBlockAsm8B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B - -memmove_long_match_emit_encodeBetterBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeBetterBlockAsm8B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B - CMPL R8, $0x00000800 - JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - SUBL $0x08, R12 - - // emitRepeat - LEAL -4(R12), R12 - JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -long_offset_short_match_nolit_encodeBetterBlockAsm8B: - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short - -cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -emit_copy_three_match_nolit_encodeBetterBlockAsm8B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -match_is_repeat_encodeBetterBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B - JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B - -three_bytes_match_emit_repeat_encodeBetterBlockAsm8B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B - -two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_repeat_encodeBetterBlockAsm8B - JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B - -one_byte_match_emit_repeat_encodeBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_repeat_encodeBetterBlockAsm8B: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x04 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 - CMPQ R8, $0x08 - JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 - CMPQ R8, $0x10 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: - MOVL (R9), R10 - MOVL R10, (CX) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: - MOVL (R9), R10 - MOVL -4(R9)(R8*1), R9 - MOVL R10, (CX) - MOVL R9, -4(CX)(R8*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B - -emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B - -memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R13 - SUBQ R10, R13 - DECQ R11 - JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R13*1), R10 - LEAQ -32(CX)(R13*1), R14 - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R10 - ADDQ $0x20, R13 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R13*1), X4 - MOVOU -16(R9)(R13*1), X5 - MOVOA X4, -32(CX)(R13*1) - MOVOA X5, -16(CX)(R13*1) - ADDQ $0x20, R13 - CMPQ R8, R13 - JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitRepeat - MOVL R12, SI - LEAL -4(R12), R12 - CMPL SI, $0x08 - JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B - CMPL SI, $0x0c - JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B - -cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: - CMPL R12, $0x00000104 - JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B - LEAL -256(R12), R12 - MOVW $0x0019, (CX) - MOVW R12, 2(CX) - ADDQ $0x04, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: - LEAL -4(R12), R12 - MOVW $0x0015, (CX) - MOVB R12, 2(CX) - ADDQ $0x03, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - -repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: - SHLL $0x02, R12 - ORL $0x01, R12 - MOVW R12, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B - XORQ SI, SI - LEAL 1(SI)(R12*4), R12 - MOVB R8, 1(CX) - SARL $0x08, R8 - SHLL $0x05, R8 - ORL R8, R12 - MOVB R12, (CX) - ADDQ $0x02, CX - -match_nolit_emitcopy_end_encodeBetterBlockAsm8B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeBetterBlockAsm8B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeBetterBlockAsm8B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x36, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x38, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 4096(AX)(R11*4) - MOVL R14, 4096(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeBetterBlockAsm8B: - CMPQ R8, R9 - JAE search_loop_encodeBetterBlockAsm8B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeBetterBlockAsm8B - -emit_remainder_encodeBetterBlockAsm8B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeBetterBlockAsm8B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeBetterBlockAsm8B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeBetterBlockAsm8B - JB three_bytes_emit_remainder_encodeBetterBlockAsm8B - -three_bytes_emit_remainder_encodeBetterBlockAsm8B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B - -two_bytes_emit_remainder_encodeBetterBlockAsm8B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeBetterBlockAsm8B - JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B - -one_byte_emit_remainder_encodeBetterBlockAsm8B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeBetterBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B - -memmove_long_emit_remainder_encodeBetterBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBlockAsm(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBlockAsm: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBlockAsm - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBlockAsm: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_encodeSnappyBlockAsm - -repeat_extend_back_loop_encodeSnappyBlockAsm: - CMPL DI, SI - JBE repeat_extend_back_end_encodeSnappyBlockAsm - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeSnappyBlockAsm - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_encodeSnappyBlockAsm - -repeat_extend_back_end_encodeSnappyBlockAsm: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 5(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeSnappyBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeSnappyBlockAsm: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x00010000 - JB three_bytes_repeat_emit_encodeSnappyBlockAsm - CMPL SI, $0x01000000 - JB four_bytes_repeat_emit_encodeSnappyBlockAsm - MOVB $0xfc, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm - -four_bytes_repeat_emit_encodeSnappyBlockAsm: - MOVL SI, R10 - SHRL $0x10, R10 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R10, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm - -three_bytes_repeat_emit_encodeSnappyBlockAsm: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm - -two_bytes_repeat_emit_encodeSnappyBlockAsm: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeSnappyBlockAsm - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm - -one_byte_repeat_emit_encodeSnappyBlockAsm: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeSnappyBlockAsm: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm - -memmove_long_repeat_emit_encodeSnappyBlockAsm: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 - JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeSnappyBlockAsm: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm - -matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm - -matchlen_match8_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm - -matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm - -matchlen_match4_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_encodeSnappyBlockAsm: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm - JB repeat_extend_forward_end_encodeSnappyBlockAsm - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_encodeSnappyBlockAsm - -matchlen_match1_repeat_extend_encodeSnappyBlockAsm: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_encodeSnappyBlockAsm - LEAL 1(R11), R11 - -repeat_extend_forward_end_encodeSnappyBlockAsm: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy - CMPL DI, $0x00010000 - JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm - -four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: - CMPL SI, $0x40 - JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm - MOVB $0xff, (CX) - MOVL DI, 1(CX) - LEAL -64(SI), SI - ADDQ $0x05, CX - CMPL SI, $0x04 - JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm - JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm - -four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: - TESTL SI, SI - JZ repeat_end_emit_encodeSnappyBlockAsm - XORL R8, R8 - LEAL -1(R8)(SI*4), SI - MOVB SI, (CX) - MOVL DI, 1(CX) - ADDQ $0x05, CX - JMP repeat_end_emit_encodeSnappyBlockAsm - -two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm - -two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeSnappyBlockAsm - -emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeSnappyBlockAsm: - MOVL DX, 12(SP) - JMP search_loop_encodeSnappyBlockAsm - -no_repeat_found_encodeSnappyBlockAsm: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBlockAsm - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeSnappyBlockAsm - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeSnappyBlockAsm - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBlockAsm - -candidate3_match_encodeSnappyBlockAsm: - ADDL $0x02, DX - JMP candidate_match_encodeSnappyBlockAsm - -candidate2_match_encodeSnappyBlockAsm: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeSnappyBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBlockAsm - -match_extend_back_loop_encodeSnappyBlockAsm: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBlockAsm - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBlockAsm - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBlockAsm - JMP match_extend_back_loop_encodeSnappyBlockAsm - -match_extend_back_end_encodeSnappyBlockAsm: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 5(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBlockAsm: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x00010000 - JB three_bytes_match_emit_encodeSnappyBlockAsm - CMPL R8, $0x01000000 - JB four_bytes_match_emit_encodeSnappyBlockAsm - MOVB $0xfc, (CX) - MOVL R8, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm - -four_bytes_match_emit_encodeSnappyBlockAsm: - MOVL R8, R10 - SHRL $0x10, R10 - MOVB $0xf8, (CX) - MOVW R8, 1(CX) - MOVB R10, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm - -three_bytes_match_emit_encodeSnappyBlockAsm: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm - -two_bytes_match_emit_encodeSnappyBlockAsm: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeSnappyBlockAsm - JMP memmove_long_match_emit_encodeSnappyBlockAsm - -one_byte_match_emit_encodeSnappyBlockAsm: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBlockAsm: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBlockAsm: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeSnappyBlockAsm - -memmove_long_match_emit_encodeSnappyBlockAsm: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeSnappyBlockAsm: -match_nolit_loop_encodeSnappyBlockAsm: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBlockAsm - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm - -matchlen_bsf_16match_nolit_encodeSnappyBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm - -matchlen_match8_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBlockAsm - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm - -matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm - -matchlen_match4_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBlockAsm - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeSnappyBlockAsm: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBlockAsm - JB match_nolit_end_encodeSnappyBlockAsm - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeSnappyBlockAsm - -matchlen_match1_match_nolit_encodeSnappyBlockAsm: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeSnappyBlockAsm - LEAL 1(R10), R10 - -match_nolit_end_encodeSnappyBlockAsm: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL SI, $0x00010000 - JB two_byte_offset_match_nolit_encodeSnappyBlockAsm - -four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: - CMPL R10, $0x40 - JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm - MOVB $0xff, (CX) - MOVL SI, 1(CX) - LEAL -64(R10), R10 - ADDQ $0x05, CX - CMPL R10, $0x04 - JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm - JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm - -four_bytes_remain_match_nolit_encodeSnappyBlockAsm: - TESTL R10, R10 - JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm - XORL DI, DI - LEAL -1(DI)(R10*4), R10 - MOVB R10, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm - -two_byte_offset_match_nolit_encodeSnappyBlockAsm: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm - -two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm - -emit_copy_three_match_nolit_encodeSnappyBlockAsm: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBlockAsm: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBlockAsm: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeSnappyBlockAsm - INCL DX - JMP search_loop_encodeSnappyBlockAsm - -emit_remainder_encodeSnappyBlockAsm: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 5(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBlockAsm: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBlockAsm - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBlockAsm - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeSnappyBlockAsm - CMPL DX, $0x01000000 - JB four_bytes_emit_remainder_encodeSnappyBlockAsm - MOVB $0xfc, (CX) - MOVL DX, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm - -four_bytes_emit_remainder_encodeSnappyBlockAsm: - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm - -three_bytes_emit_remainder_encodeSnappyBlockAsm: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm - -two_bytes_emit_remainder_encodeSnappyBlockAsm: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBlockAsm - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm - -one_byte_emit_remainder_encodeSnappyBlockAsm: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm - -memmove_long_emit_remainder_encodeSnappyBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBlockAsm: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBlockAsm64K(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBlockAsm64K: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBlockAsm64K - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBlockAsm64K: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm64K - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm64K - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_encodeSnappyBlockAsm64K - -repeat_extend_back_loop_encodeSnappyBlockAsm64K: - CMPL DI, SI - JBE repeat_extend_back_end_encodeSnappyBlockAsm64K - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeSnappyBlockAsm64K - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K - -repeat_extend_back_end_encodeSnappyBlockAsm64K: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeSnappyBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeSnappyBlockAsm64K: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeSnappyBlockAsm64K - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K - JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K - -three_bytes_repeat_emit_encodeSnappyBlockAsm64K: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K - -two_bytes_repeat_emit_encodeSnappyBlockAsm64K: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeSnappyBlockAsm64K - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K - -one_byte_repeat_emit_encodeSnappyBlockAsm64K: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeSnappyBlockAsm64K: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K - -memmove_long_repeat_emit_encodeSnappyBlockAsm64K: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 - JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K - -matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K - -matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K - -matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K - -matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K - JB repeat_extend_forward_end_encodeSnappyBlockAsm64K - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K - -matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K - LEAL 1(R11), R11 - -repeat_extend_forward_end_encodeSnappyBlockAsm64K: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy -two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K - -two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeSnappyBlockAsm64K - -emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeSnappyBlockAsm64K: - MOVL DX, 12(SP) - JMP search_loop_encodeSnappyBlockAsm64K - -no_repeat_found_encodeSnappyBlockAsm64K: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBlockAsm64K - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeSnappyBlockAsm64K - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeSnappyBlockAsm64K - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBlockAsm64K - -candidate3_match_encodeSnappyBlockAsm64K: - ADDL $0x02, DX - JMP candidate_match_encodeSnappyBlockAsm64K - -candidate2_match_encodeSnappyBlockAsm64K: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeSnappyBlockAsm64K: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBlockAsm64K - -match_extend_back_loop_encodeSnappyBlockAsm64K: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBlockAsm64K - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBlockAsm64K - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBlockAsm64K - JMP match_extend_back_loop_encodeSnappyBlockAsm64K - -match_extend_back_end_encodeSnappyBlockAsm64K: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBlockAsm64K: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeSnappyBlockAsm64K - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBlockAsm64K - JB three_bytes_match_emit_encodeSnappyBlockAsm64K - -three_bytes_match_emit_encodeSnappyBlockAsm64K: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm64K - -two_bytes_match_emit_encodeSnappyBlockAsm64K: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeSnappyBlockAsm64K - JMP memmove_long_match_emit_encodeSnappyBlockAsm64K - -one_byte_match_emit_encodeSnappyBlockAsm64K: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBlockAsm64K: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K - -memmove_long_match_emit_encodeSnappyBlockAsm64K: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeSnappyBlockAsm64K: -match_nolit_loop_encodeSnappyBlockAsm64K: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K - -matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm64K - -matchlen_match8_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K - -matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm64K - -matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K - JB match_nolit_end_encodeSnappyBlockAsm64K - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeSnappyBlockAsm64K - -matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeSnappyBlockAsm64K - LEAL 1(R10), R10 - -match_nolit_end_encodeSnappyBlockAsm64K: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K - -two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K - -emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm64K - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBlockAsm64K: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x32, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x32, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeSnappyBlockAsm64K - INCL DX - JMP search_loop_encodeSnappyBlockAsm64K - -emit_remainder_encodeSnappyBlockAsm64K: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBlockAsm64K: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBlockAsm64K - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K - JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K - -three_bytes_emit_remainder_encodeSnappyBlockAsm64K: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K - -two_bytes_emit_remainder_encodeSnappyBlockAsm64K: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBlockAsm64K - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K - -one_byte_emit_remainder_encodeSnappyBlockAsm64K: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBlockAsm64K: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K - -memmove_long_emit_remainder_encodeSnappyBlockAsm64K: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBlockAsm12B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000080, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBlockAsm12B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBlockAsm12B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBlockAsm12B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm12B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x18, R11 - IMULQ R9, R11 - SHRQ $0x34, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x18, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm12B - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_encodeSnappyBlockAsm12B - -repeat_extend_back_loop_encodeSnappyBlockAsm12B: - CMPL DI, SI - JBE repeat_extend_back_end_encodeSnappyBlockAsm12B - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeSnappyBlockAsm12B - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B - -repeat_extend_back_end_encodeSnappyBlockAsm12B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeSnappyBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeSnappyBlockAsm12B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeSnappyBlockAsm12B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B - JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B - -three_bytes_repeat_emit_encodeSnappyBlockAsm12B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B - -two_bytes_repeat_emit_encodeSnappyBlockAsm12B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeSnappyBlockAsm12B - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B - -one_byte_repeat_emit_encodeSnappyBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeSnappyBlockAsm12B: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B - -memmove_long_repeat_emit_encodeSnappyBlockAsm12B: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 - JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B - -matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B - -matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B - -matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B - -matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B - JB repeat_extend_forward_end_encodeSnappyBlockAsm12B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B - -matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B - LEAL 1(R11), R11 - -repeat_extend_forward_end_encodeSnappyBlockAsm12B: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy -two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B - -two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeSnappyBlockAsm12B - -emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeSnappyBlockAsm12B: - MOVL DX, 12(SP) - JMP search_loop_encodeSnappyBlockAsm12B - -no_repeat_found_encodeSnappyBlockAsm12B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBlockAsm12B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeSnappyBlockAsm12B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeSnappyBlockAsm12B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBlockAsm12B - -candidate3_match_encodeSnappyBlockAsm12B: - ADDL $0x02, DX - JMP candidate_match_encodeSnappyBlockAsm12B - -candidate2_match_encodeSnappyBlockAsm12B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeSnappyBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBlockAsm12B - -match_extend_back_loop_encodeSnappyBlockAsm12B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBlockAsm12B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBlockAsm12B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBlockAsm12B - JMP match_extend_back_loop_encodeSnappyBlockAsm12B - -match_extend_back_end_encodeSnappyBlockAsm12B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBlockAsm12B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeSnappyBlockAsm12B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBlockAsm12B - JB three_bytes_match_emit_encodeSnappyBlockAsm12B - -three_bytes_match_emit_encodeSnappyBlockAsm12B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm12B - -two_bytes_match_emit_encodeSnappyBlockAsm12B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeSnappyBlockAsm12B - JMP memmove_long_match_emit_encodeSnappyBlockAsm12B - -one_byte_match_emit_encodeSnappyBlockAsm12B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBlockAsm12B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B - -memmove_long_match_emit_encodeSnappyBlockAsm12B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeSnappyBlockAsm12B: -match_nolit_loop_encodeSnappyBlockAsm12B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B - -matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm12B - -matchlen_match8_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B - -matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm12B - -matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B - JB match_nolit_end_encodeSnappyBlockAsm12B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeSnappyBlockAsm12B - -matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeSnappyBlockAsm12B - LEAL 1(R10), R10 - -match_nolit_end_encodeSnappyBlockAsm12B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B - -two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B - -emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm12B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBlockAsm12B: - MOVQ $0x000000cf1bbcdcbb, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x18, R8 - IMULQ R9, R8 - SHRQ $0x34, R8 - SHLQ $0x18, SI - IMULQ R9, SI - SHRQ $0x34, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeSnappyBlockAsm12B - INCL DX - JMP search_loop_encodeSnappyBlockAsm12B - -emit_remainder_encodeSnappyBlockAsm12B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBlockAsm12B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBlockAsm12B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B - JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B - -three_bytes_emit_remainder_encodeSnappyBlockAsm12B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B - -two_bytes_emit_remainder_encodeSnappyBlockAsm12B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBlockAsm12B - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B - -one_byte_emit_remainder_encodeSnappyBlockAsm12B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B - -memmove_long_emit_remainder_encodeSnappyBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBlockAsm10B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000020, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBlockAsm10B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBlockAsm10B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBlockAsm10B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm10B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x36, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm10B - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_encodeSnappyBlockAsm10B - -repeat_extend_back_loop_encodeSnappyBlockAsm10B: - CMPL DI, SI - JBE repeat_extend_back_end_encodeSnappyBlockAsm10B - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeSnappyBlockAsm10B - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B - -repeat_extend_back_end_encodeSnappyBlockAsm10B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeSnappyBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeSnappyBlockAsm10B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeSnappyBlockAsm10B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B - JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B - -three_bytes_repeat_emit_encodeSnappyBlockAsm10B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B - -two_bytes_repeat_emit_encodeSnappyBlockAsm10B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeSnappyBlockAsm10B - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B - -one_byte_repeat_emit_encodeSnappyBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeSnappyBlockAsm10B: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B - -memmove_long_repeat_emit_encodeSnappyBlockAsm10B: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 - JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B - -matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B - -matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B - -matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B - -matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B - JB repeat_extend_forward_end_encodeSnappyBlockAsm10B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B - -matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B - LEAL 1(R11), R11 - -repeat_extend_forward_end_encodeSnappyBlockAsm10B: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy -two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B - -two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeSnappyBlockAsm10B - -emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeSnappyBlockAsm10B: - MOVL DX, 12(SP) - JMP search_loop_encodeSnappyBlockAsm10B - -no_repeat_found_encodeSnappyBlockAsm10B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBlockAsm10B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeSnappyBlockAsm10B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeSnappyBlockAsm10B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBlockAsm10B - -candidate3_match_encodeSnappyBlockAsm10B: - ADDL $0x02, DX - JMP candidate_match_encodeSnappyBlockAsm10B - -candidate2_match_encodeSnappyBlockAsm10B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeSnappyBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBlockAsm10B - -match_extend_back_loop_encodeSnappyBlockAsm10B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBlockAsm10B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBlockAsm10B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBlockAsm10B - JMP match_extend_back_loop_encodeSnappyBlockAsm10B - -match_extend_back_end_encodeSnappyBlockAsm10B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBlockAsm10B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeSnappyBlockAsm10B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBlockAsm10B - JB three_bytes_match_emit_encodeSnappyBlockAsm10B - -three_bytes_match_emit_encodeSnappyBlockAsm10B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm10B - -two_bytes_match_emit_encodeSnappyBlockAsm10B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeSnappyBlockAsm10B - JMP memmove_long_match_emit_encodeSnappyBlockAsm10B - -one_byte_match_emit_encodeSnappyBlockAsm10B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBlockAsm10B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B - -memmove_long_match_emit_encodeSnappyBlockAsm10B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeSnappyBlockAsm10B: -match_nolit_loop_encodeSnappyBlockAsm10B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B - -matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm10B - -matchlen_match8_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B - -matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm10B - -matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B - JB match_nolit_end_encodeSnappyBlockAsm10B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeSnappyBlockAsm10B - -matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeSnappyBlockAsm10B - LEAL 1(R10), R10 - -match_nolit_end_encodeSnappyBlockAsm10B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B - -two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B - -emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm10B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBlockAsm10B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x36, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x36, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeSnappyBlockAsm10B - INCL DX - JMP search_loop_encodeSnappyBlockAsm10B - -emit_remainder_encodeSnappyBlockAsm10B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBlockAsm10B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBlockAsm10B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B - JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B - -three_bytes_emit_remainder_encodeSnappyBlockAsm10B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B - -two_bytes_emit_remainder_encodeSnappyBlockAsm10B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBlockAsm10B - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B - -one_byte_emit_remainder_encodeSnappyBlockAsm10B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B - -memmove_long_emit_remainder_encodeSnappyBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBlockAsm8B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000008, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBlockAsm8B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBlockAsm8B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBlockAsm8B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm8B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x38, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x38, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_encodeSnappyBlockAsm8B - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_encodeSnappyBlockAsm8B - -repeat_extend_back_loop_encodeSnappyBlockAsm8B: - CMPL DI, SI - JBE repeat_extend_back_end_encodeSnappyBlockAsm8B - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_encodeSnappyBlockAsm8B - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B - -repeat_extend_back_end_encodeSnappyBlockAsm8B: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_encodeSnappyBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -repeat_dst_size_check_encodeSnappyBlockAsm8B: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_encodeSnappyBlockAsm8B - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B - JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B - -three_bytes_repeat_emit_encodeSnappyBlockAsm8B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B - -two_bytes_repeat_emit_encodeSnappyBlockAsm8B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_encodeSnappyBlockAsm8B - JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B - -one_byte_repeat_emit_encodeSnappyBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_repeat_emit_encodeSnappyBlockAsm8B: - LEAQ (CX)(R8*1), SI - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: - MOVQ (R9), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: - MOVQ (R9), R10 - MOVQ -8(R9)(R8*1), R9 - MOVQ R10, (CX) - MOVQ R9, -8(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: - MOVOU (R9), X0 - MOVOU -16(R9)(R8*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R8*1) - JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - -memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: - MOVQ SI, CX - JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B - -memmove_long_repeat_emit_encodeSnappyBlockAsm8B: - LEAQ (CX)(R8*1), SI - - // genMemMoveLong - MOVOU (R9), X0 - MOVOU 16(R9), X1 - MOVOU -32(R9)(R8*1), X2 - MOVOU -16(R9)(R8*1), X3 - MOVQ R8, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R9)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R9)(R12*1), X4 - MOVOU -16(R9)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R8, R12 - JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R8*1) - MOVOU X3, -16(CX)(R8*1) - MOVQ SI, CX - -emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B - -matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B - -matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B - -matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B - -matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B - JB repeat_extend_forward_end_encodeSnappyBlockAsm8B - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B - -matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B - LEAL 1(R11), R11 - -repeat_extend_forward_end_encodeSnappyBlockAsm8B: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy -two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B - MOVB $0xee, (CX) - MOVW DI, 1(CX) - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B - -two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B - LEAL -15(R8), R8 - MOVB DI, 1(CX) - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, R8 - MOVB R8, (CX) - ADDQ $0x02, CX - JMP repeat_end_emit_encodeSnappyBlockAsm8B - -emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: - LEAL -2(R8), R8 - MOVB R8, (CX) - MOVW DI, 1(CX) - ADDQ $0x03, CX - -repeat_end_emit_encodeSnappyBlockAsm8B: - MOVL DX, 12(SP) - JMP search_loop_encodeSnappyBlockAsm8B - -no_repeat_found_encodeSnappyBlockAsm8B: - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBlockAsm8B - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_encodeSnappyBlockAsm8B - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_encodeSnappyBlockAsm8B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBlockAsm8B - -candidate3_match_encodeSnappyBlockAsm8B: - ADDL $0x02, DX - JMP candidate_match_encodeSnappyBlockAsm8B - -candidate2_match_encodeSnappyBlockAsm8B: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_encodeSnappyBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBlockAsm8B - -match_extend_back_loop_encodeSnappyBlockAsm8B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBlockAsm8B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBlockAsm8B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBlockAsm8B - JMP match_extend_back_loop_encodeSnappyBlockAsm8B - -match_extend_back_end_encodeSnappyBlockAsm8B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBlockAsm8B: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), R8 - CMPL R8, $0x3c - JB one_byte_match_emit_encodeSnappyBlockAsm8B - CMPL R8, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBlockAsm8B - JB three_bytes_match_emit_encodeSnappyBlockAsm8B - -three_bytes_match_emit_encodeSnappyBlockAsm8B: - MOVB $0xf4, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBlockAsm8B - -two_bytes_match_emit_encodeSnappyBlockAsm8B: - MOVB $0xf0, (CX) - MOVB R8, 1(CX) - ADDQ $0x02, CX - CMPL R8, $0x40 - JB memmove_match_emit_encodeSnappyBlockAsm8B - JMP memmove_long_match_emit_encodeSnappyBlockAsm8B - -one_byte_match_emit_encodeSnappyBlockAsm8B: - SHLB $0x02, R8 - MOVB R8, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBlockAsm8B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: - MOVQ (DI), R10 - MOVQ R10, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: - MOVQ (DI), R10 - MOVQ -8(DI)(R9*1), DI - MOVQ R10, (CX) - MOVQ DI, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: - MOVOU (DI), X0 - MOVOU -16(DI)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: - MOVQ R8, CX - JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B - -memmove_long_match_emit_encodeSnappyBlockAsm8B: - LEAQ (CX)(R9*1), R8 - - // genMemMoveLong - MOVOU (DI), X0 - MOVOU 16(DI), X1 - MOVOU -32(DI)(R9*1), X2 - MOVOU -16(DI)(R9*1), X3 - MOVQ R9, R11 - SHRQ $0x05, R11 - MOVQ CX, R10 - ANDL $0x0000001f, R10 - MOVQ $0x00000040, R12 - SUBQ R10, R12 - DECQ R11 - JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(DI)(R12*1), R10 - LEAQ -32(CX)(R12*1), R13 - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: - MOVOU (R10), X4 - MOVOU 16(R10), X5 - MOVOA X4, (R13) - MOVOA X5, 16(R13) - ADDQ $0x20, R13 - ADDQ $0x20, R10 - ADDQ $0x20, R12 - DECQ R11 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(DI)(R12*1), X4 - MOVOU -16(DI)(R12*1), X5 - MOVOA X4, -32(CX)(R12*1) - MOVOA X5, -16(CX)(R12*1) - ADDQ $0x20, R12 - CMPQ R9, R12 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ R8, CX - -emit_literal_done_match_emit_encodeSnappyBlockAsm8B: -match_nolit_loop_encodeSnappyBlockAsm8B: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B - -matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm8B - -matchlen_match8_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B - -matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_encodeSnappyBlockAsm8B - -matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B - JB match_nolit_end_encodeSnappyBlockAsm8B - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_encodeSnappyBlockAsm8B - -matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_encodeSnappyBlockAsm8B - LEAL 1(R10), R10 - -match_nolit_end_encodeSnappyBlockAsm8B: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B - MOVB $0xee, (CX) - MOVW SI, 1(CX) - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B - -two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B - LEAL -15(DI), DI - MOVB SI, 1(CX) - SHRL $0x08, SI - SHLL $0x05, SI - ORL SI, DI - MOVB DI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B - -emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: - LEAL -2(DI), DI - MOVB DI, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBlockAsm8B - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBlockAsm8B: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x38, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x38, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_encodeSnappyBlockAsm8B - INCL DX - JMP search_loop_encodeSnappyBlockAsm8B - -emit_remainder_encodeSnappyBlockAsm8B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBlockAsm8B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBlockAsm8B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B - JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B - -three_bytes_emit_remainder_encodeSnappyBlockAsm8B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B - -two_bytes_emit_remainder_encodeSnappyBlockAsm8B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBlockAsm8B - JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B - -one_byte_emit_remainder_encodeSnappyBlockAsm8B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B - -memmove_long_emit_remainder_encodeSnappyBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00001200, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBetterBlockAsm: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBetterBlockAsm - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBetterBlockAsm: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - CMPL SI, $0x63 - JBE check_maxskip_ok_encodeSnappyBetterBlockAsm - LEAL 100(DX), SI - JMP check_maxskip_cont_encodeSnappyBetterBlockAsm - -check_maxskip_ok_encodeSnappyBetterBlockAsm: - LEAL 1(DX)(SI*1), SI - -check_maxskip_cont_encodeSnappyBetterBlockAsm: - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL (AX)(R10*4), SI - MOVL 524288(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 524288(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm - CMPQ R11, DI - JNE no_short_found_encodeSnappyBetterBlockAsm - MOVL R8, SI - JMP candidate_match_encodeSnappyBetterBlockAsm - -no_short_found_encodeSnappyBetterBlockAsm: - CMPL R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm - CMPL R11, DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBetterBlockAsm - -candidateS_match_encodeSnappyBetterBlockAsm: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x2f, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBetterBlockAsm - DECL DX - MOVL R8, SI - -candidate_match_encodeSnappyBetterBlockAsm: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm - -match_extend_back_loop_encodeSnappyBetterBlockAsm: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBetterBlockAsm - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBetterBlockAsm - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm - JMP match_extend_back_loop_encodeSnappyBetterBlockAsm - -match_extend_back_end_encodeSnappyBetterBlockAsm: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 5(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBetterBlockAsm: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm - -matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm - -matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm - -matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm - -matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm - JB match_nolit_end_encodeSnappyBetterBlockAsm - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeSnappyBetterBlockAsm - -matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeSnappyBetterBlockAsm - LEAL 1(R12), R12 - -match_nolit_end_encodeSnappyBetterBlockAsm: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - CMPL R12, $0x01 - JA match_length_ok_encodeSnappyBetterBlockAsm - CMPL R8, $0x0000ffff - JBE match_length_ok_encodeSnappyBetterBlockAsm - MOVL 20(SP), DX - INCL DX - JMP search_loop_encodeSnappyBetterBlockAsm - -match_length_ok_encodeSnappyBetterBlockAsm: - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x00010000 - JB three_bytes_match_emit_encodeSnappyBetterBlockAsm - CMPL SI, $0x01000000 - JB four_bytes_match_emit_encodeSnappyBetterBlockAsm - MOVB $0xfc, (CX) - MOVL SI, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm - -four_bytes_match_emit_encodeSnappyBetterBlockAsm: - MOVL SI, R11 - SHRL $0x10, R11 - MOVB $0xf8, (CX) - MOVW SI, 1(CX) - MOVB R11, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm - -three_bytes_match_emit_encodeSnappyBetterBlockAsm: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm - -two_bytes_match_emit_encodeSnappyBetterBlockAsm: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeSnappyBetterBlockAsm - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm - -one_byte_match_emit_encodeSnappyBetterBlockAsm: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm - -memmove_long_match_emit_encodeSnappyBetterBlockAsm: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy - CMPL R8, $0x00010000 - JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm - -four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R12, $0x40 - JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm - MOVB $0xff, (CX) - MOVL R8, 1(CX) - LEAL -64(R12), R12 - ADDQ $0x05, CX - CMPL R12, $0x04 - JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm - JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm - -four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: - TESTL R12, R12 - JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm - XORL SI, SI - LEAL -1(SI)(R12*4), R12 - MOVB R12, (CX) - MOVL R8, 1(CX) - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm - -two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm - -two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm - -emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBetterBlockAsm: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x32, R11 - SHLQ $0x08, R12 - IMULQ SI, R12 - SHRQ $0x2f, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x32, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 524288(AX)(R11*4) - MOVL R14, 524288(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeSnappyBetterBlockAsm: - CMPQ R8, R9 - JAE search_loop_encodeSnappyBetterBlockAsm - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x2f, R10 - SHLQ $0x08, R11 - IMULQ SI, R11 - SHRQ $0x2f, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeSnappyBetterBlockAsm - -emit_remainder_encodeSnappyBetterBlockAsm: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 5(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBetterBlockAsm - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBetterBlockAsm: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm - CMPL DX, $0x00010000 - JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm - CMPL DX, $0x01000000 - JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm - MOVB $0xfc, (CX) - MOVL DX, 1(CX) - ADDQ $0x05, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm - -four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: - MOVL DX, BX - SHRL $0x10, BX - MOVB $0xf8, (CX) - MOVW DX, 1(CX) - MOVB BL, 3(CX) - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm - -three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm - -two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBetterBlockAsm - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm - -one_byte_emit_remainder_encodeSnappyBetterBlockAsm: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBetterBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm - -memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm64K(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000900, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBetterBlockAsm64K: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBetterBlockAsm64K - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBetterBlockAsm64K: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x07, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm64K - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x00cf1bbcdcbfa563, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x33, R11 - MOVL (AX)(R10*4), SI - MOVL 262144(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 262144(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm64K - CMPQ R11, DI - JNE no_short_found_encodeSnappyBetterBlockAsm64K - MOVL R8, SI - JMP candidate_match_encodeSnappyBetterBlockAsm64K - -no_short_found_encodeSnappyBetterBlockAsm64K: - CMPL R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm64K - CMPL R11, DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm64K - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBetterBlockAsm64K - -candidateS_match_encodeSnappyBetterBlockAsm64K: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x08, R10 - IMULQ R9, R10 - SHRQ $0x30, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBetterBlockAsm64K - DECL DX - MOVL R8, SI - -candidate_match_encodeSnappyBetterBlockAsm64K: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K - -match_extend_back_loop_encodeSnappyBetterBlockAsm64K: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K - JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K - -match_extend_back_end_encodeSnappyBetterBlockAsm64K: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBetterBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBetterBlockAsm64K: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K - -matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm64K - -matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K - -matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm64K - -matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K - JB match_nolit_end_encodeSnappyBetterBlockAsm64K - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeSnappyBetterBlockAsm64K - -matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeSnappyBetterBlockAsm64K - LEAL 1(R12), R12 - -match_nolit_end_encodeSnappyBetterBlockAsm64K: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K - JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K - -three_bytes_match_emit_encodeSnappyBetterBlockAsm64K: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K - -two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeSnappyBetterBlockAsm64K - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K - -one_byte_match_emit_encodeSnappyBetterBlockAsm64K: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBetterBlockAsm64K: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K - -memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K - -two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K - -emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm64K - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: - MOVQ $0x00cf1bbcdcbfa563, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x33, R11 - SHLQ $0x08, R12 - IMULQ SI, R12 - SHRQ $0x30, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x33, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 262144(AX)(R11*4) - MOVL R14, 262144(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeSnappyBetterBlockAsm64K: - CMPQ R8, R9 - JAE search_loop_encodeSnappyBetterBlockAsm64K - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x08, R10 - IMULQ SI, R10 - SHRQ $0x30, R10 - SHLQ $0x08, R11 - IMULQ SI, R11 - SHRQ $0x30, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeSnappyBetterBlockAsm64K - -emit_remainder_encodeSnappyBetterBlockAsm64K: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBetterBlockAsm64K: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K - JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K - -three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K - -two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K - -one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K - -memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm12B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000280, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBetterBlockAsm12B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBetterBlockAsm12B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBetterBlockAsm12B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x06, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm12B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL (AX)(R10*4), SI - MOVL 65536(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 65536(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm12B - CMPQ R11, DI - JNE no_short_found_encodeSnappyBetterBlockAsm12B - MOVL R8, SI - JMP candidate_match_encodeSnappyBetterBlockAsm12B - -no_short_found_encodeSnappyBetterBlockAsm12B: - CMPL R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm12B - CMPL R11, DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm12B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBetterBlockAsm12B - -candidateS_match_encodeSnappyBetterBlockAsm12B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x32, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBetterBlockAsm12B - DECL DX - MOVL R8, SI - -candidate_match_encodeSnappyBetterBlockAsm12B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B - -match_extend_back_loop_encodeSnappyBetterBlockAsm12B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B - JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B - -match_extend_back_end_encodeSnappyBetterBlockAsm12B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBetterBlockAsm12B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B - -matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm12B - -matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B - -matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm12B - -matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B - JB match_nolit_end_encodeSnappyBetterBlockAsm12B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeSnappyBetterBlockAsm12B - -matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeSnappyBetterBlockAsm12B - LEAL 1(R12), R12 - -match_nolit_end_encodeSnappyBetterBlockAsm12B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B - JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B - -three_bytes_match_emit_encodeSnappyBetterBlockAsm12B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B - -two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeSnappyBetterBlockAsm12B - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B - -one_byte_match_emit_encodeSnappyBetterBlockAsm12B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B - -memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B - -two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B - -emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm12B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x34, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x32, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x34, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 65536(AX)(R11*4) - MOVL R14, 65536(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeSnappyBetterBlockAsm12B: - CMPQ R8, R9 - JAE search_loop_encodeSnappyBetterBlockAsm12B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x32, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x32, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeSnappyBetterBlockAsm12B - -emit_remainder_encodeSnappyBetterBlockAsm12B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBetterBlockAsm12B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B - JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B - -three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B - -two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B - -one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B - -memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm10B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x000000a0, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBetterBlockAsm10B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBetterBlockAsm10B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBetterBlockAsm10B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm10B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL (AX)(R10*4), SI - MOVL 16384(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 16384(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm10B - CMPQ R11, DI - JNE no_short_found_encodeSnappyBetterBlockAsm10B - MOVL R8, SI - JMP candidate_match_encodeSnappyBetterBlockAsm10B - -no_short_found_encodeSnappyBetterBlockAsm10B: - CMPL R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm10B - CMPL R11, DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm10B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBetterBlockAsm10B - -candidateS_match_encodeSnappyBetterBlockAsm10B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x34, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBetterBlockAsm10B - DECL DX - MOVL R8, SI - -candidate_match_encodeSnappyBetterBlockAsm10B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B - -match_extend_back_loop_encodeSnappyBetterBlockAsm10B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B - JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B - -match_extend_back_end_encodeSnappyBetterBlockAsm10B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBetterBlockAsm10B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B - -matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm10B - -matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B - -matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm10B - -matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B - JB match_nolit_end_encodeSnappyBetterBlockAsm10B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeSnappyBetterBlockAsm10B - -matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeSnappyBetterBlockAsm10B - LEAL 1(R12), R12 - -match_nolit_end_encodeSnappyBetterBlockAsm10B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B - JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B - -three_bytes_match_emit_encodeSnappyBetterBlockAsm10B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B - -two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeSnappyBetterBlockAsm10B - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B - -one_byte_match_emit_encodeSnappyBetterBlockAsm10B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B - -memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B - -two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B - CMPL R8, $0x00000800 - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B - -emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm10B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x36, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x34, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x36, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 16384(AX)(R11*4) - MOVL R14, 16384(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeSnappyBetterBlockAsm10B: - CMPQ R8, R9 - JAE search_loop_encodeSnappyBetterBlockAsm10B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x34, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x34, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeSnappyBetterBlockAsm10B - -emit_remainder_encodeSnappyBetterBlockAsm10B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBetterBlockAsm10B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B - JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B - -three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B - -two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B - -one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B - -memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int -// Requires: BMI, SSE2 -TEXT ·encodeSnappyBetterBlockAsm8B(SB), $24-64 - MOVQ tmp+48(FP), AX - MOVQ dst_base+0(FP), CX - MOVQ $0x00000028, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_encodeSnappyBetterBlockAsm8B: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_encodeSnappyBetterBlockAsm8B - MOVL $0x00000000, 12(SP) - MOVQ src_len+32(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL $0x00000000, 16(SP) - MOVQ src_base+24(FP), BX - -search_loop_encodeSnappyBetterBlockAsm8B: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 1(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm8B - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ $0x9e3779b1, SI - MOVQ DI, R10 - MOVQ DI, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ SI, R11 - SHRQ $0x38, R11 - MOVL (AX)(R10*4), SI - MOVL 4096(AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - MOVL DX, 4096(AX)(R11*4) - MOVQ (BX)(SI*1), R10 - MOVQ (BX)(R8*1), R11 - CMPQ R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm8B - CMPQ R11, DI - JNE no_short_found_encodeSnappyBetterBlockAsm8B - MOVL R8, SI - JMP candidate_match_encodeSnappyBetterBlockAsm8B - -no_short_found_encodeSnappyBetterBlockAsm8B: - CMPL R10, DI - JEQ candidate_match_encodeSnappyBetterBlockAsm8B - CMPL R11, DI - JEQ candidateS_match_encodeSnappyBetterBlockAsm8B - MOVL 20(SP), DX - JMP search_loop_encodeSnappyBetterBlockAsm8B - -candidateS_match_encodeSnappyBetterBlockAsm8B: - SHRQ $0x08, DI - MOVQ DI, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x36, R10 - MOVL (AX)(R10*4), SI - INCL DX - MOVL DX, (AX)(R10*4) - CMPL (BX)(SI*1), DI - JEQ candidate_match_encodeSnappyBetterBlockAsm8B - DECL DX - MOVL R8, SI - -candidate_match_encodeSnappyBetterBlockAsm8B: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B - -match_extend_back_loop_encodeSnappyBetterBlockAsm8B: - CMPL DX, DI - JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B - JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B - -match_extend_back_end_encodeSnappyBetterBlockAsm8B: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_encodeSnappyBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_dst_size_check_encodeSnappyBetterBlockAsm8B: - MOVL DX, DI - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+32(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), R10 - - // matchLen - XORL R12, R12 - -matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x10 - JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B - MOVQ (R9)(R12*1), R11 - MOVQ 8(R9)(R12*1), R13 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B - XORQ 8(R10)(R12*1), R13 - JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B - LEAL -16(R8), R8 - LEAL 16(R12), R12 - JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B - -matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R13, R13 - -#else - BSFQ R13, R13 - -#endif - SARQ $0x03, R13 - LEAL 8(R12)(R13*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm8B - -matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x08 - JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B - MOVQ (R9)(R12*1), R11 - XORQ (R10)(R12*1), R11 - JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B - LEAL -8(R8), R8 - LEAL 8(R12), R12 - JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B - -matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL (R12)(R11*1), R12 - JMP match_nolit_end_encodeSnappyBetterBlockAsm8B - -matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x04 - JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B - MOVL (R9)(R12*1), R11 - CMPL (R10)(R12*1), R11 - JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B - LEAL -4(R8), R8 - LEAL 4(R12), R12 - -matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R8, $0x01 - JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B - JB match_nolit_end_encodeSnappyBetterBlockAsm8B - MOVW (R9)(R12*1), R11 - CMPW (R10)(R12*1), R11 - JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B - LEAL 2(R12), R12 - SUBL $0x02, R8 - JZ match_nolit_end_encodeSnappyBetterBlockAsm8B - -matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: - MOVB (R9)(R12*1), R11 - CMPB (R10)(R12*1), R11 - JNE match_nolit_end_encodeSnappyBetterBlockAsm8B - LEAL 1(R12), R12 - -match_nolit_end_encodeSnappyBetterBlockAsm8B: - MOVL DX, R8 - SUBL SI, R8 - - // Check if repeat - MOVL R8, 16(SP) - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R10 - SUBL SI, R9 - LEAL -1(R9), SI - CMPL SI, $0x3c - JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B - CMPL SI, $0x00000100 - JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B - JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B - -three_bytes_match_emit_encodeSnappyBetterBlockAsm8B: - MOVB $0xf4, (CX) - MOVW SI, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B - -two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: - MOVB $0xf0, (CX) - MOVB SI, 1(CX) - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_match_emit_encodeSnappyBetterBlockAsm8B - JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B - -one_byte_match_emit_encodeSnappyBetterBlockAsm8B: - SHLB $0x02, SI - MOVB SI, (CX) - ADDQ $0x01, CX - -memmove_match_emit_encodeSnappyBetterBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: - MOVQ (R10), R11 - MOVQ R11, (CX) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: - MOVQ (R10), R11 - MOVQ -8(R10)(R9*1), R10 - MOVQ R11, (CX) - MOVQ R10, -8(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: - MOVOU (R10), X0 - MOVOU -16(R10)(R9*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(R9*1) - JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - -memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: - MOVQ SI, CX - JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B - -memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: - LEAQ (CX)(R9*1), SI - - // genMemMoveLong - MOVOU (R10), X0 - MOVOU 16(R10), X1 - MOVOU -32(R10)(R9*1), X2 - MOVOU -16(R10)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ CX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R14 - SUBQ R11, R14 - DECQ R13 - JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(R10)(R14*1), R11 - LEAQ -32(CX)(R14*1), R15 - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R11 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(R10)(R14*1), X4 - MOVOU -16(R10)(R14*1), X5 - MOVOA X4, -32(CX)(R14*1) - MOVOA X5, -16(CX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(R9*1) - MOVOU X3, -16(CX)(R9*1) - MOVQ SI, CX - -emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: - ADDL R12, DX - ADDL $0x04, R12 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: - CMPL R12, $0x40 - JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B - MOVB $0xee, (CX) - MOVW R8, 1(CX) - LEAL -60(R12), R12 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B - -two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: - MOVL R12, SI - SHLL $0x02, SI - CMPL R12, $0x0c - JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B - LEAL -15(SI), SI - MOVB R8, 1(CX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, SI - MOVB SI, (CX) - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B - -emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: - LEAL -2(SI), SI - MOVB SI, (CX) - MOVW R8, 1(CX) - ADDQ $0x03, CX - -match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: - CMPL DX, 8(SP) - JAE emit_remainder_encodeSnappyBetterBlockAsm8B - CMPQ CX, (SP) - JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: - MOVQ $0x0000cf1bbcdcbf9b, SI - MOVQ $0x9e3779b1, R8 - LEAQ 1(DI), DI - LEAQ -2(DX), R9 - MOVQ (BX)(DI*1), R10 - MOVQ 1(BX)(DI*1), R11 - MOVQ (BX)(R9*1), R12 - MOVQ 1(BX)(R9*1), R13 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x20, R11 - IMULQ R8, R11 - SHRQ $0x38, R11 - SHLQ $0x10, R12 - IMULQ SI, R12 - SHRQ $0x36, R12 - SHLQ $0x20, R13 - IMULQ R8, R13 - SHRQ $0x38, R13 - LEAQ 1(DI), R8 - LEAQ 1(R9), R14 - MOVL DI, (AX)(R10*4) - MOVL R9, (AX)(R12*4) - MOVL R8, 4096(AX)(R11*4) - MOVL R14, 4096(AX)(R13*4) - LEAQ 1(R9)(DI*1), R8 - SHRQ $0x01, R8 - ADDQ $0x01, DI - SUBQ $0x01, R9 - -index_loop_encodeSnappyBetterBlockAsm8B: - CMPQ R8, R9 - JAE search_loop_encodeSnappyBetterBlockAsm8B - MOVQ (BX)(DI*1), R10 - MOVQ (BX)(R8*1), R11 - SHLQ $0x10, R10 - IMULQ SI, R10 - SHRQ $0x36, R10 - SHLQ $0x10, R11 - IMULQ SI, R11 - SHRQ $0x36, R11 - MOVL DI, (AX)(R10*4) - MOVL R8, (AX)(R11*4) - ADDQ $0x02, DI - ADDQ $0x02, R8 - JMP index_loop_encodeSnappyBetterBlockAsm8B - -emit_remainder_encodeSnappyBetterBlockAsm8B: - MOVQ src_len+32(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B - MOVQ $0x00000000, ret+56(FP) - RET - -emit_remainder_ok_encodeSnappyBetterBlockAsm8B: - MOVQ src_len+32(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), DX - CMPL DX, $0x3c - JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B - CMPL DX, $0x00000100 - JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B - JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B - -three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: - MOVB $0xf4, (CX) - MOVW DX, 1(CX) - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B - -two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: - MOVB $0xf0, (CX) - MOVB DL, 1(CX) - ADDQ $0x02, CX - CMPL DX, $0x40 - JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B - JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B - -one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: - SHLB $0x02, DL - MOVB DL, (CX) - ADDQ $0x01, CX - -memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveShort - CMPQ BX, $0x03 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2 - JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3 - CMPQ BX, $0x08 - JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7 - CMPQ BX, $0x10 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 - CMPQ BX, $0x20 - JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 - JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2: - MOVB (AX), SI - MOVB -1(AX)(BX*1), AL - MOVB SI, (CX) - MOVB AL, -1(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3: - MOVW (AX), SI - MOVB 2(AX), AL - MOVW SI, (CX) - MOVB AL, 2(CX) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7: - MOVL (AX), SI - MOVL -4(AX)(BX*1), AX - MOVL SI, (CX) - MOVL AX, -4(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: - MOVQ (AX), SI - MOVQ -8(AX)(BX*1), AX - MOVQ SI, (CX) - MOVQ AX, -8(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: - MOVOU (AX), X0 - MOVOU -16(AX)(BX*1), X1 - MOVOU X0, (CX) - MOVOU X1, -16(CX)(BX*1) - JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B - -emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - -memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: - MOVQ DX, CX - JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B - -memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: - LEAQ (CX)(SI*1), DX - MOVL SI, BX - - // genMemMoveLong - MOVOU (AX), X0 - MOVOU 16(AX), X1 - MOVOU -32(AX)(BX*1), X2 - MOVOU -16(AX)(BX*1), X3 - MOVQ BX, DI - SHRQ $0x05, DI - MOVQ CX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 - LEAQ -32(AX)(R8*1), SI - LEAQ -32(CX)(R8*1), R9 - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back - -emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: - MOVOU -32(AX)(R8*1), X4 - MOVOU -16(AX)(R8*1), X5 - MOVOA X4, -32(CX)(R8*1) - MOVOA X5, -16(CX)(R8*1) - ADDQ $0x20, R8 - CMPQ BX, R8 - JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 - MOVOU X0, (CX) - MOVOU X1, 16(CX) - MOVOU X2, -32(CX)(BX*1) - MOVOU X3, -16(CX)(BX*1) - MOVQ DX, CX - -emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: - MOVQ dst_base+0(FP), AX - SUBQ AX, CX - MOVQ CX, ret+56(FP) - RET - -// func calcBlockSize(src []byte, tmp *[32768]byte) int -// Requires: BMI, SSE2 -TEXT ·calcBlockSize(SB), $24-40 - MOVQ tmp+24(FP), AX - XORQ CX, CX - MOVQ $0x00000100, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_calcBlockSize: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_calcBlockSize - MOVL $0x00000000, 12(SP) - MOVQ src_len+8(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+0(FP), BX - -search_loop_calcBlockSize: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x05, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_calcBlockSize - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x33, R10 - SHLQ $0x10, R11 - IMULQ R9, R11 - SHRQ $0x33, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x10, R10 - IMULQ R9, R10 - SHRQ $0x33, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_calcBlockSize - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_calcBlockSize - -repeat_extend_back_loop_calcBlockSize: - CMPL DI, SI - JBE repeat_extend_back_end_calcBlockSize - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_calcBlockSize - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_calcBlockSize - -repeat_extend_back_end_calcBlockSize: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 5(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_calcBlockSize - MOVQ $0x00000000, ret+32(FP) - RET - -repeat_dst_size_check_calcBlockSize: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_calcBlockSize - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_calcBlockSize - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_calcBlockSize - CMPL SI, $0x00010000 - JB three_bytes_repeat_emit_calcBlockSize - CMPL SI, $0x01000000 - JB four_bytes_repeat_emit_calcBlockSize - ADDQ $0x05, CX - JMP memmove_long_repeat_emit_calcBlockSize - -four_bytes_repeat_emit_calcBlockSize: - ADDQ $0x04, CX - JMP memmove_long_repeat_emit_calcBlockSize - -three_bytes_repeat_emit_calcBlockSize: - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_calcBlockSize - -two_bytes_repeat_emit_calcBlockSize: - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_calcBlockSize - JMP memmove_long_repeat_emit_calcBlockSize - -one_byte_repeat_emit_calcBlockSize: - ADDQ $0x01, CX - -memmove_repeat_emit_calcBlockSize: - LEAQ (CX)(R8*1), CX - JMP emit_literal_done_repeat_emit_calcBlockSize - -memmove_long_repeat_emit_calcBlockSize: - LEAQ (CX)(R8*1), CX - -emit_literal_done_repeat_emit_calcBlockSize: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+8(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_calcBlockSize: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_calcBlockSize - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_calcBlockSize - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_calcBlockSize - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_calcBlockSize - -matchlen_bsf_16repeat_extend_calcBlockSize: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_calcBlockSize - -matchlen_match8_repeat_extend_calcBlockSize: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_calcBlockSize - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_calcBlockSize - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_calcBlockSize - -matchlen_bsf_8_repeat_extend_calcBlockSize: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_calcBlockSize - -matchlen_match4_repeat_extend_calcBlockSize: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_calcBlockSize - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_calcBlockSize - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_calcBlockSize: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_calcBlockSize - JB repeat_extend_forward_end_calcBlockSize - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_calcBlockSize - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_calcBlockSize - -matchlen_match1_repeat_extend_calcBlockSize: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_calcBlockSize - LEAL 1(R11), R11 - -repeat_extend_forward_end_calcBlockSize: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy - CMPL DI, $0x00010000 - JB two_byte_offset_repeat_as_copy_calcBlockSize - -four_bytes_loop_back_repeat_as_copy_calcBlockSize: - CMPL SI, $0x40 - JBE four_bytes_remain_repeat_as_copy_calcBlockSize - LEAL -64(SI), SI - ADDQ $0x05, CX - CMPL SI, $0x04 - JB four_bytes_remain_repeat_as_copy_calcBlockSize - JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize - -four_bytes_remain_repeat_as_copy_calcBlockSize: - TESTL SI, SI - JZ repeat_end_emit_calcBlockSize - XORL SI, SI - ADDQ $0x05, CX - JMP repeat_end_emit_calcBlockSize - -two_byte_offset_repeat_as_copy_calcBlockSize: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_calcBlockSize - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_calcBlockSize - -two_byte_offset_short_repeat_as_copy_calcBlockSize: - MOVL SI, R8 - SHLL $0x02, R8 - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_calcBlockSize - CMPL DI, $0x00000800 - JAE emit_copy_three_repeat_as_copy_calcBlockSize - ADDQ $0x02, CX - JMP repeat_end_emit_calcBlockSize - -emit_copy_three_repeat_as_copy_calcBlockSize: - ADDQ $0x03, CX - -repeat_end_emit_calcBlockSize: - MOVL DX, 12(SP) - JMP search_loop_calcBlockSize - -no_repeat_found_calcBlockSize: - CMPL (BX)(SI*1), DI - JEQ candidate_match_calcBlockSize - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_calcBlockSize - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_calcBlockSize - MOVL 20(SP), DX - JMP search_loop_calcBlockSize - -candidate3_match_calcBlockSize: - ADDL $0x02, DX - JMP candidate_match_calcBlockSize - -candidate2_match_calcBlockSize: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_calcBlockSize: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_calcBlockSize - -match_extend_back_loop_calcBlockSize: - CMPL DX, DI - JBE match_extend_back_end_calcBlockSize - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_calcBlockSize - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_calcBlockSize - JMP match_extend_back_loop_calcBlockSize - -match_extend_back_end_calcBlockSize: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 5(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_calcBlockSize - MOVQ $0x00000000, ret+32(FP) - RET - -match_dst_size_check_calcBlockSize: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_calcBlockSize - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), DI - CMPL DI, $0x3c - JB one_byte_match_emit_calcBlockSize - CMPL DI, $0x00000100 - JB two_bytes_match_emit_calcBlockSize - CMPL DI, $0x00010000 - JB three_bytes_match_emit_calcBlockSize - CMPL DI, $0x01000000 - JB four_bytes_match_emit_calcBlockSize - ADDQ $0x05, CX - JMP memmove_long_match_emit_calcBlockSize - -four_bytes_match_emit_calcBlockSize: - ADDQ $0x04, CX - JMP memmove_long_match_emit_calcBlockSize - -three_bytes_match_emit_calcBlockSize: - ADDQ $0x03, CX - JMP memmove_long_match_emit_calcBlockSize - -two_bytes_match_emit_calcBlockSize: - ADDQ $0x02, CX - CMPL DI, $0x40 - JB memmove_match_emit_calcBlockSize - JMP memmove_long_match_emit_calcBlockSize - -one_byte_match_emit_calcBlockSize: - ADDQ $0x01, CX - -memmove_match_emit_calcBlockSize: - LEAQ (CX)(R9*1), CX - JMP emit_literal_done_match_emit_calcBlockSize - -memmove_long_match_emit_calcBlockSize: - LEAQ (CX)(R9*1), CX - -emit_literal_done_match_emit_calcBlockSize: -match_nolit_loop_calcBlockSize: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+8(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_calcBlockSize: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_calcBlockSize - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_calcBlockSize - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_calcBlockSize - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_calcBlockSize - -matchlen_bsf_16match_nolit_calcBlockSize: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_calcBlockSize - -matchlen_match8_match_nolit_calcBlockSize: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_calcBlockSize - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_calcBlockSize - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_calcBlockSize - -matchlen_bsf_8_match_nolit_calcBlockSize: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_calcBlockSize - -matchlen_match4_match_nolit_calcBlockSize: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_calcBlockSize - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_calcBlockSize - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_calcBlockSize: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_calcBlockSize - JB match_nolit_end_calcBlockSize - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_calcBlockSize - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_calcBlockSize - -matchlen_match1_match_nolit_calcBlockSize: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_calcBlockSize - LEAL 1(R10), R10 - -match_nolit_end_calcBlockSize: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy - CMPL SI, $0x00010000 - JB two_byte_offset_match_nolit_calcBlockSize - -four_bytes_loop_back_match_nolit_calcBlockSize: - CMPL R10, $0x40 - JBE four_bytes_remain_match_nolit_calcBlockSize - LEAL -64(R10), R10 - ADDQ $0x05, CX - CMPL R10, $0x04 - JB four_bytes_remain_match_nolit_calcBlockSize - JMP four_bytes_loop_back_match_nolit_calcBlockSize - -four_bytes_remain_match_nolit_calcBlockSize: - TESTL R10, R10 - JZ match_nolit_emitcopy_end_calcBlockSize - XORL SI, SI - ADDQ $0x05, CX - JMP match_nolit_emitcopy_end_calcBlockSize - -two_byte_offset_match_nolit_calcBlockSize: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_calcBlockSize - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_calcBlockSize - -two_byte_offset_short_match_nolit_calcBlockSize: - MOVL R10, DI - SHLL $0x02, DI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_calcBlockSize - CMPL SI, $0x00000800 - JAE emit_copy_three_match_nolit_calcBlockSize - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_calcBlockSize - -emit_copy_three_match_nolit_calcBlockSize: - ADDQ $0x03, CX - -match_nolit_emitcopy_end_calcBlockSize: - CMPL DX, 8(SP) - JAE emit_remainder_calcBlockSize - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_calcBlockSize - MOVQ $0x00000000, ret+32(FP) - RET - -match_nolit_dst_ok_calcBlockSize: - MOVQ $0x0000cf1bbcdcbf9b, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x10, R8 - IMULQ R9, R8 - SHRQ $0x33, R8 - SHLQ $0x10, SI - IMULQ R9, SI - SHRQ $0x33, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_calcBlockSize - INCL DX - JMP search_loop_calcBlockSize - -emit_remainder_calcBlockSize: - MOVQ src_len+8(FP), AX - SUBL 12(SP), AX - LEAQ 5(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_calcBlockSize - MOVQ $0x00000000, ret+32(FP) - RET - -emit_remainder_ok_calcBlockSize: - MOVQ src_len+8(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_calcBlockSize - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), AX - CMPL AX, $0x3c - JB one_byte_emit_remainder_calcBlockSize - CMPL AX, $0x00000100 - JB two_bytes_emit_remainder_calcBlockSize - CMPL AX, $0x00010000 - JB three_bytes_emit_remainder_calcBlockSize - CMPL AX, $0x01000000 - JB four_bytes_emit_remainder_calcBlockSize - ADDQ $0x05, CX - JMP memmove_long_emit_remainder_calcBlockSize - -four_bytes_emit_remainder_calcBlockSize: - ADDQ $0x04, CX - JMP memmove_long_emit_remainder_calcBlockSize - -three_bytes_emit_remainder_calcBlockSize: - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_calcBlockSize - -two_bytes_emit_remainder_calcBlockSize: - ADDQ $0x02, CX - CMPL AX, $0x40 - JB memmove_emit_remainder_calcBlockSize - JMP memmove_long_emit_remainder_calcBlockSize - -one_byte_emit_remainder_calcBlockSize: - ADDQ $0x01, CX - -memmove_emit_remainder_calcBlockSize: - LEAQ (CX)(SI*1), AX - MOVQ AX, CX - JMP emit_literal_done_emit_remainder_calcBlockSize - -memmove_long_emit_remainder_calcBlockSize: - LEAQ (CX)(SI*1), AX - MOVQ AX, CX - -emit_literal_done_emit_remainder_calcBlockSize: - MOVQ CX, ret+32(FP) - RET - -// func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int -// Requires: BMI, SSE2 -TEXT ·calcBlockSizeSmall(SB), $24-40 - MOVQ tmp+24(FP), AX - XORQ CX, CX - MOVQ $0x00000010, DX - MOVQ AX, BX - PXOR X0, X0 - -zero_loop_calcBlockSizeSmall: - MOVOU X0, (BX) - MOVOU X0, 16(BX) - MOVOU X0, 32(BX) - MOVOU X0, 48(BX) - MOVOU X0, 64(BX) - MOVOU X0, 80(BX) - MOVOU X0, 96(BX) - MOVOU X0, 112(BX) - ADDQ $0x80, BX - DECQ DX - JNZ zero_loop_calcBlockSizeSmall - MOVL $0x00000000, 12(SP) - MOVQ src_len+8(FP), DX - LEAQ -9(DX), BX - LEAQ -8(DX), SI - MOVL SI, 8(SP) - SHRQ $0x05, DX - SUBL DX, BX - LEAQ (CX)(BX*1), BX - MOVQ BX, (SP) - MOVL $0x00000001, DX - MOVL DX, 16(SP) - MOVQ src_base+0(FP), BX - -search_loop_calcBlockSizeSmall: - MOVL DX, SI - SUBL 12(SP), SI - SHRL $0x04, SI - LEAL 4(DX)(SI*1), SI - CMPL SI, 8(SP) - JAE emit_remainder_calcBlockSizeSmall - MOVQ (BX)(DX*1), DI - MOVL SI, 20(SP) - MOVQ $0x9e3779b1, R9 - MOVQ DI, R10 - MOVQ DI, R11 - SHRQ $0x08, R11 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x37, R10 - SHLQ $0x20, R11 - IMULQ R9, R11 - SHRQ $0x37, R11 - MOVL (AX)(R10*4), SI - MOVL (AX)(R11*4), R8 - MOVL DX, (AX)(R10*4) - LEAL 1(DX), R10 - MOVL R10, (AX)(R11*4) - MOVQ DI, R10 - SHRQ $0x10, R10 - SHLQ $0x20, R10 - IMULQ R9, R10 - SHRQ $0x37, R10 - MOVL DX, R9 - SUBL 16(SP), R9 - MOVL 1(BX)(R9*1), R11 - MOVQ DI, R9 - SHRQ $0x08, R9 - CMPL R9, R11 - JNE no_repeat_found_calcBlockSizeSmall - LEAL 1(DX), DI - MOVL 12(SP), SI - MOVL DI, R8 - SUBL 16(SP), R8 - JZ repeat_extend_back_end_calcBlockSizeSmall - -repeat_extend_back_loop_calcBlockSizeSmall: - CMPL DI, SI - JBE repeat_extend_back_end_calcBlockSizeSmall - MOVB -1(BX)(R8*1), R9 - MOVB -1(BX)(DI*1), R10 - CMPB R9, R10 - JNE repeat_extend_back_end_calcBlockSizeSmall - LEAL -1(DI), DI - DECL R8 - JNZ repeat_extend_back_loop_calcBlockSizeSmall - -repeat_extend_back_end_calcBlockSizeSmall: - MOVL DI, SI - SUBL 12(SP), SI - LEAQ 3(CX)(SI*1), SI - CMPQ SI, (SP) - JB repeat_dst_size_check_calcBlockSizeSmall - MOVQ $0x00000000, ret+32(FP) - RET - -repeat_dst_size_check_calcBlockSizeSmall: - MOVL 12(SP), SI - CMPL SI, DI - JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall - MOVL DI, R8 - MOVL DI, 12(SP) - LEAQ (BX)(SI*1), R9 - SUBL SI, R8 - LEAL -1(R8), SI - CMPL SI, $0x3c - JB one_byte_repeat_emit_calcBlockSizeSmall - CMPL SI, $0x00000100 - JB two_bytes_repeat_emit_calcBlockSizeSmall - JB three_bytes_repeat_emit_calcBlockSizeSmall - -three_bytes_repeat_emit_calcBlockSizeSmall: - ADDQ $0x03, CX - JMP memmove_long_repeat_emit_calcBlockSizeSmall - -two_bytes_repeat_emit_calcBlockSizeSmall: - ADDQ $0x02, CX - CMPL SI, $0x40 - JB memmove_repeat_emit_calcBlockSizeSmall - JMP memmove_long_repeat_emit_calcBlockSizeSmall - -one_byte_repeat_emit_calcBlockSizeSmall: - ADDQ $0x01, CX - -memmove_repeat_emit_calcBlockSizeSmall: - LEAQ (CX)(R8*1), CX - JMP emit_literal_done_repeat_emit_calcBlockSizeSmall - -memmove_long_repeat_emit_calcBlockSizeSmall: - LEAQ (CX)(R8*1), CX - -emit_literal_done_repeat_emit_calcBlockSizeSmall: - ADDL $0x05, DX - MOVL DX, SI - SUBL 16(SP), SI - MOVQ src_len+8(FP), R8 - SUBL DX, R8 - LEAQ (BX)(DX*1), R9 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R11, R11 - -matchlen_loopback_16_repeat_extend_calcBlockSizeSmall: - CMPL R8, $0x10 - JB matchlen_match8_repeat_extend_calcBlockSizeSmall - MOVQ (R9)(R11*1), R10 - MOVQ 8(R9)(R11*1), R12 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall - XORQ 8(SI)(R11*1), R12 - JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall - LEAL -16(R8), R8 - LEAL 16(R11), R11 - JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall - -matchlen_bsf_16repeat_extend_calcBlockSizeSmall: -#ifdef GOAMD64_v3 - TZCNTQ R12, R12 - -#else - BSFQ R12, R12 - -#endif - SARQ $0x03, R12 - LEAL 8(R11)(R12*1), R11 - JMP repeat_extend_forward_end_calcBlockSizeSmall - -matchlen_match8_repeat_extend_calcBlockSizeSmall: - CMPL R8, $0x08 - JB matchlen_match4_repeat_extend_calcBlockSizeSmall - MOVQ (R9)(R11*1), R10 - XORQ (SI)(R11*1), R10 - JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall - LEAL -8(R8), R8 - LEAL 8(R11), R11 - JMP matchlen_match4_repeat_extend_calcBlockSizeSmall - -matchlen_bsf_8_repeat_extend_calcBlockSizeSmall: -#ifdef GOAMD64_v3 - TZCNTQ R10, R10 - -#else - BSFQ R10, R10 - -#endif - SARQ $0x03, R10 - LEAL (R11)(R10*1), R11 - JMP repeat_extend_forward_end_calcBlockSizeSmall - -matchlen_match4_repeat_extend_calcBlockSizeSmall: - CMPL R8, $0x04 - JB matchlen_match2_repeat_extend_calcBlockSizeSmall - MOVL (R9)(R11*1), R10 - CMPL (SI)(R11*1), R10 - JNE matchlen_match2_repeat_extend_calcBlockSizeSmall - LEAL -4(R8), R8 - LEAL 4(R11), R11 - -matchlen_match2_repeat_extend_calcBlockSizeSmall: - CMPL R8, $0x01 - JE matchlen_match1_repeat_extend_calcBlockSizeSmall - JB repeat_extend_forward_end_calcBlockSizeSmall - MOVW (R9)(R11*1), R10 - CMPW (SI)(R11*1), R10 - JNE matchlen_match1_repeat_extend_calcBlockSizeSmall - LEAL 2(R11), R11 - SUBL $0x02, R8 - JZ repeat_extend_forward_end_calcBlockSizeSmall - -matchlen_match1_repeat_extend_calcBlockSizeSmall: - MOVB (R9)(R11*1), R10 - CMPB (SI)(R11*1), R10 - JNE repeat_extend_forward_end_calcBlockSizeSmall - LEAL 1(R11), R11 - -repeat_extend_forward_end_calcBlockSizeSmall: - ADDL R11, DX - MOVL DX, SI - SUBL DI, SI - MOVL 16(SP), DI - - // emitCopy -two_byte_offset_repeat_as_copy_calcBlockSizeSmall: - CMPL SI, $0x40 - JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall - LEAL -60(SI), SI - ADDQ $0x03, CX - JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall - -two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: - MOVL SI, DI - SHLL $0x02, DI - CMPL SI, $0x0c - JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall - ADDQ $0x02, CX - JMP repeat_end_emit_calcBlockSizeSmall - -emit_copy_three_repeat_as_copy_calcBlockSizeSmall: - ADDQ $0x03, CX - -repeat_end_emit_calcBlockSizeSmall: - MOVL DX, 12(SP) - JMP search_loop_calcBlockSizeSmall - -no_repeat_found_calcBlockSizeSmall: - CMPL (BX)(SI*1), DI - JEQ candidate_match_calcBlockSizeSmall - SHRQ $0x08, DI - MOVL (AX)(R10*4), SI - LEAL 2(DX), R9 - CMPL (BX)(R8*1), DI - JEQ candidate2_match_calcBlockSizeSmall - MOVL R9, (AX)(R10*4) - SHRQ $0x08, DI - CMPL (BX)(SI*1), DI - JEQ candidate3_match_calcBlockSizeSmall - MOVL 20(SP), DX - JMP search_loop_calcBlockSizeSmall - -candidate3_match_calcBlockSizeSmall: - ADDL $0x02, DX - JMP candidate_match_calcBlockSizeSmall - -candidate2_match_calcBlockSizeSmall: - MOVL R9, (AX)(R10*4) - INCL DX - MOVL R8, SI - -candidate_match_calcBlockSizeSmall: - MOVL 12(SP), DI - TESTL SI, SI - JZ match_extend_back_end_calcBlockSizeSmall - -match_extend_back_loop_calcBlockSizeSmall: - CMPL DX, DI - JBE match_extend_back_end_calcBlockSizeSmall - MOVB -1(BX)(SI*1), R8 - MOVB -1(BX)(DX*1), R9 - CMPB R8, R9 - JNE match_extend_back_end_calcBlockSizeSmall - LEAL -1(DX), DX - DECL SI - JZ match_extend_back_end_calcBlockSizeSmall - JMP match_extend_back_loop_calcBlockSizeSmall - -match_extend_back_end_calcBlockSizeSmall: - MOVL DX, DI - SUBL 12(SP), DI - LEAQ 3(CX)(DI*1), DI - CMPQ DI, (SP) - JB match_dst_size_check_calcBlockSizeSmall - MOVQ $0x00000000, ret+32(FP) - RET - -match_dst_size_check_calcBlockSizeSmall: - MOVL DX, DI - MOVL 12(SP), R8 - CMPL R8, DI - JEQ emit_literal_done_match_emit_calcBlockSizeSmall - MOVL DI, R9 - MOVL DI, 12(SP) - LEAQ (BX)(R8*1), DI - SUBL R8, R9 - LEAL -1(R9), DI - CMPL DI, $0x3c - JB one_byte_match_emit_calcBlockSizeSmall - CMPL DI, $0x00000100 - JB two_bytes_match_emit_calcBlockSizeSmall - JB three_bytes_match_emit_calcBlockSizeSmall - -three_bytes_match_emit_calcBlockSizeSmall: - ADDQ $0x03, CX - JMP memmove_long_match_emit_calcBlockSizeSmall - -two_bytes_match_emit_calcBlockSizeSmall: - ADDQ $0x02, CX - CMPL DI, $0x40 - JB memmove_match_emit_calcBlockSizeSmall - JMP memmove_long_match_emit_calcBlockSizeSmall - -one_byte_match_emit_calcBlockSizeSmall: - ADDQ $0x01, CX - -memmove_match_emit_calcBlockSizeSmall: - LEAQ (CX)(R9*1), CX - JMP emit_literal_done_match_emit_calcBlockSizeSmall - -memmove_long_match_emit_calcBlockSizeSmall: - LEAQ (CX)(R9*1), CX - -emit_literal_done_match_emit_calcBlockSizeSmall: -match_nolit_loop_calcBlockSizeSmall: - MOVL DX, DI - SUBL SI, DI - MOVL DI, 16(SP) - ADDL $0x04, DX - ADDL $0x04, SI - MOVQ src_len+8(FP), DI - SUBL DX, DI - LEAQ (BX)(DX*1), R8 - LEAQ (BX)(SI*1), SI - - // matchLen - XORL R10, R10 - -matchlen_loopback_16_match_nolit_calcBlockSizeSmall: - CMPL DI, $0x10 - JB matchlen_match8_match_nolit_calcBlockSizeSmall - MOVQ (R8)(R10*1), R9 - MOVQ 8(R8)(R10*1), R11 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall - XORQ 8(SI)(R10*1), R11 - JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall - LEAL -16(DI), DI - LEAL 16(R10), R10 - JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall - -matchlen_bsf_16match_nolit_calcBlockSizeSmall: -#ifdef GOAMD64_v3 - TZCNTQ R11, R11 - -#else - BSFQ R11, R11 - -#endif - SARQ $0x03, R11 - LEAL 8(R10)(R11*1), R10 - JMP match_nolit_end_calcBlockSizeSmall - -matchlen_match8_match_nolit_calcBlockSizeSmall: - CMPL DI, $0x08 - JB matchlen_match4_match_nolit_calcBlockSizeSmall - MOVQ (R8)(R10*1), R9 - XORQ (SI)(R10*1), R9 - JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall - LEAL -8(DI), DI - LEAL 8(R10), R10 - JMP matchlen_match4_match_nolit_calcBlockSizeSmall - -matchlen_bsf_8_match_nolit_calcBlockSizeSmall: -#ifdef GOAMD64_v3 - TZCNTQ R9, R9 - -#else - BSFQ R9, R9 - -#endif - SARQ $0x03, R9 - LEAL (R10)(R9*1), R10 - JMP match_nolit_end_calcBlockSizeSmall - -matchlen_match4_match_nolit_calcBlockSizeSmall: - CMPL DI, $0x04 - JB matchlen_match2_match_nolit_calcBlockSizeSmall - MOVL (R8)(R10*1), R9 - CMPL (SI)(R10*1), R9 - JNE matchlen_match2_match_nolit_calcBlockSizeSmall - LEAL -4(DI), DI - LEAL 4(R10), R10 - -matchlen_match2_match_nolit_calcBlockSizeSmall: - CMPL DI, $0x01 - JE matchlen_match1_match_nolit_calcBlockSizeSmall - JB match_nolit_end_calcBlockSizeSmall - MOVW (R8)(R10*1), R9 - CMPW (SI)(R10*1), R9 - JNE matchlen_match1_match_nolit_calcBlockSizeSmall - LEAL 2(R10), R10 - SUBL $0x02, DI - JZ match_nolit_end_calcBlockSizeSmall - -matchlen_match1_match_nolit_calcBlockSizeSmall: - MOVB (R8)(R10*1), R9 - CMPB (SI)(R10*1), R9 - JNE match_nolit_end_calcBlockSizeSmall - LEAL 1(R10), R10 - -match_nolit_end_calcBlockSizeSmall: - ADDL R10, DX - MOVL 16(SP), SI - ADDL $0x04, R10 - MOVL DX, 12(SP) - - // emitCopy -two_byte_offset_match_nolit_calcBlockSizeSmall: - CMPL R10, $0x40 - JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall - LEAL -60(R10), R10 - ADDQ $0x03, CX - JMP two_byte_offset_match_nolit_calcBlockSizeSmall - -two_byte_offset_short_match_nolit_calcBlockSizeSmall: - MOVL R10, SI - SHLL $0x02, SI - CMPL R10, $0x0c - JAE emit_copy_three_match_nolit_calcBlockSizeSmall - ADDQ $0x02, CX - JMP match_nolit_emitcopy_end_calcBlockSizeSmall - -emit_copy_three_match_nolit_calcBlockSizeSmall: - ADDQ $0x03, CX - -match_nolit_emitcopy_end_calcBlockSizeSmall: - CMPL DX, 8(SP) - JAE emit_remainder_calcBlockSizeSmall - MOVQ -2(BX)(DX*1), DI - CMPQ CX, (SP) - JB match_nolit_dst_ok_calcBlockSizeSmall - MOVQ $0x00000000, ret+32(FP) - RET - -match_nolit_dst_ok_calcBlockSizeSmall: - MOVQ $0x9e3779b1, R9 - MOVQ DI, R8 - SHRQ $0x10, DI - MOVQ DI, SI - SHLQ $0x20, R8 - IMULQ R9, R8 - SHRQ $0x37, R8 - SHLQ $0x20, SI - IMULQ R9, SI - SHRQ $0x37, SI - LEAL -2(DX), R9 - LEAQ (AX)(SI*4), R10 - MOVL (R10), SI - MOVL R9, (AX)(R8*4) - MOVL DX, (R10) - CMPL (BX)(SI*1), DI - JEQ match_nolit_loop_calcBlockSizeSmall - INCL DX - JMP search_loop_calcBlockSizeSmall - -emit_remainder_calcBlockSizeSmall: - MOVQ src_len+8(FP), AX - SUBL 12(SP), AX - LEAQ 3(CX)(AX*1), AX - CMPQ AX, (SP) - JB emit_remainder_ok_calcBlockSizeSmall - MOVQ $0x00000000, ret+32(FP) - RET - -emit_remainder_ok_calcBlockSizeSmall: - MOVQ src_len+8(FP), AX - MOVL 12(SP), DX - CMPL DX, AX - JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall - MOVL AX, SI - MOVL AX, 12(SP) - LEAQ (BX)(DX*1), AX - SUBL DX, SI - LEAL -1(SI), AX - CMPL AX, $0x3c - JB one_byte_emit_remainder_calcBlockSizeSmall - CMPL AX, $0x00000100 - JB two_bytes_emit_remainder_calcBlockSizeSmall - JB three_bytes_emit_remainder_calcBlockSizeSmall - -three_bytes_emit_remainder_calcBlockSizeSmall: - ADDQ $0x03, CX - JMP memmove_long_emit_remainder_calcBlockSizeSmall - -two_bytes_emit_remainder_calcBlockSizeSmall: - ADDQ $0x02, CX - CMPL AX, $0x40 - JB memmove_emit_remainder_calcBlockSizeSmall - JMP memmove_long_emit_remainder_calcBlockSizeSmall - -one_byte_emit_remainder_calcBlockSizeSmall: - ADDQ $0x01, CX - -memmove_emit_remainder_calcBlockSizeSmall: - LEAQ (CX)(SI*1), AX - MOVQ AX, CX - JMP emit_literal_done_emit_remainder_calcBlockSizeSmall - -memmove_long_emit_remainder_calcBlockSizeSmall: - LEAQ (CX)(SI*1), AX - MOVQ AX, CX - -emit_literal_done_emit_remainder_calcBlockSizeSmall: - MOVQ CX, ret+32(FP) - RET - -// func emitLiteral(dst []byte, lit []byte) int -// Requires: SSE2 -TEXT ·emitLiteral(SB), NOSPLIT, $0-56 - MOVQ lit_len+32(FP), DX - MOVQ dst_base+0(FP), AX - MOVQ lit_base+24(FP), CX - TESTQ DX, DX - JZ emit_literal_end_standalone_skip - MOVL DX, BX - LEAL -1(DX), SI - CMPL SI, $0x3c - JB one_byte_standalone - CMPL SI, $0x00000100 - JB two_bytes_standalone - CMPL SI, $0x00010000 - JB three_bytes_standalone - CMPL SI, $0x01000000 - JB four_bytes_standalone - MOVB $0xfc, (AX) - MOVL SI, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP memmove_long_standalone - -four_bytes_standalone: - MOVL SI, DI - SHRL $0x10, DI - MOVB $0xf8, (AX) - MOVW SI, 1(AX) - MOVB DI, 3(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP memmove_long_standalone - -three_bytes_standalone: - MOVB $0xf4, (AX) - MOVW SI, 1(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - JMP memmove_long_standalone - -two_bytes_standalone: - MOVB $0xf0, (AX) - MOVB SI, 1(AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - CMPL SI, $0x40 - JB memmove_standalone - JMP memmove_long_standalone - -one_byte_standalone: - SHLB $0x02, SI - MOVB SI, (AX) - ADDQ $0x01, BX - ADDQ $0x01, AX - -memmove_standalone: - // genMemMoveShort - CMPQ DX, $0x03 - JB emit_lit_memmove_standalone_memmove_move_1or2 - JE emit_lit_memmove_standalone_memmove_move_3 - CMPQ DX, $0x08 - JB emit_lit_memmove_standalone_memmove_move_4through7 - CMPQ DX, $0x10 - JBE emit_lit_memmove_standalone_memmove_move_8through16 - CMPQ DX, $0x20 - JBE emit_lit_memmove_standalone_memmove_move_17through32 - JMP emit_lit_memmove_standalone_memmove_move_33through64 - -emit_lit_memmove_standalone_memmove_move_1or2: - MOVB (CX), SI - MOVB -1(CX)(DX*1), CL - MOVB SI, (AX) - MOVB CL, -1(AX)(DX*1) - JMP emit_literal_end_standalone - -emit_lit_memmove_standalone_memmove_move_3: - MOVW (CX), SI - MOVB 2(CX), CL - MOVW SI, (AX) - MOVB CL, 2(AX) - JMP emit_literal_end_standalone - -emit_lit_memmove_standalone_memmove_move_4through7: - MOVL (CX), SI - MOVL -4(CX)(DX*1), CX - MOVL SI, (AX) - MOVL CX, -4(AX)(DX*1) - JMP emit_literal_end_standalone - -emit_lit_memmove_standalone_memmove_move_8through16: - MOVQ (CX), SI - MOVQ -8(CX)(DX*1), CX - MOVQ SI, (AX) - MOVQ CX, -8(AX)(DX*1) - JMP emit_literal_end_standalone - -emit_lit_memmove_standalone_memmove_move_17through32: - MOVOU (CX), X0 - MOVOU -16(CX)(DX*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(DX*1) - JMP emit_literal_end_standalone - -emit_lit_memmove_standalone_memmove_move_33through64: - MOVOU (CX), X0 - MOVOU 16(CX), X1 - MOVOU -32(CX)(DX*1), X2 - MOVOU -16(CX)(DX*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(DX*1) - MOVOU X3, -16(AX)(DX*1) - JMP emit_literal_end_standalone - JMP emit_literal_end_standalone - -memmove_long_standalone: - // genMemMoveLong - MOVOU (CX), X0 - MOVOU 16(CX), X1 - MOVOU -32(CX)(DX*1), X2 - MOVOU -16(CX)(DX*1), X3 - MOVQ DX, DI - SHRQ $0x05, DI - MOVQ AX, SI - ANDL $0x0000001f, SI - MOVQ $0x00000040, R8 - SUBQ SI, R8 - DECQ DI - JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 - LEAQ -32(CX)(R8*1), SI - LEAQ -32(AX)(R8*1), R9 - -emit_lit_memmove_long_standalonelarge_big_loop_back: - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOA X4, (R9) - MOVOA X5, 16(R9) - ADDQ $0x20, R9 - ADDQ $0x20, SI - ADDQ $0x20, R8 - DECQ DI - JNA emit_lit_memmove_long_standalonelarge_big_loop_back - -emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: - MOVOU -32(CX)(R8*1), X4 - MOVOU -16(CX)(R8*1), X5 - MOVOA X4, -32(AX)(R8*1) - MOVOA X5, -16(AX)(R8*1) - ADDQ $0x20, R8 - CMPQ DX, R8 - JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(DX*1) - MOVOU X3, -16(AX)(DX*1) - JMP emit_literal_end_standalone - JMP emit_literal_end_standalone - -emit_literal_end_standalone_skip: - XORQ BX, BX - -emit_literal_end_standalone: - MOVQ BX, ret+48(FP) - RET - -// func emitRepeat(dst []byte, offset int, length int) int -TEXT ·emitRepeat(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX - - // emitRepeat -emit_repeat_again_standalone: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JBE repeat_two_standalone - CMPL SI, $0x0c - JAE cant_repeat_two_offset_standalone - CMPL CX, $0x00000800 - JB repeat_two_offset_standalone - -cant_repeat_two_offset_standalone: - CMPL DX, $0x00000104 - JB repeat_three_standalone - CMPL DX, $0x00010100 - JB repeat_four_standalone - CMPL DX, $0x0100ffff - JB repeat_five_standalone - LEAL -16842747(DX), DX - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone - -repeat_five_standalone: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_repeat_end - -repeat_four_standalone: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_repeat_end - -repeat_three_standalone: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - JMP gen_emit_repeat_end - -repeat_two_standalone: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_repeat_end - -repeat_two_offset_standalone: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - -gen_emit_repeat_end: - MOVQ BX, ret+40(FP) - RET - -// func emitCopy(dst []byte, offset int, length int) int -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX - - // emitCopy - CMPL CX, $0x00010000 - JB two_byte_offset_standalone - CMPL DX, $0x40 - JBE four_bytes_remain_standalone - MOVB $0xff, (AX) - MOVL CX, 1(AX) - LEAL -64(DX), DX - ADDQ $0x05, BX - ADDQ $0x05, AX - CMPL DX, $0x04 - JB four_bytes_remain_standalone - - // emitRepeat -emit_repeat_again_standalone_emit_copy: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JBE repeat_two_standalone_emit_copy - CMPL SI, $0x0c - JAE cant_repeat_two_offset_standalone_emit_copy - CMPL CX, $0x00000800 - JB repeat_two_offset_standalone_emit_copy - -cant_repeat_two_offset_standalone_emit_copy: - CMPL DX, $0x00000104 - JB repeat_three_standalone_emit_copy - CMPL DX, $0x00010100 - JB repeat_four_standalone_emit_copy - CMPL DX, $0x0100ffff - JB repeat_five_standalone_emit_copy - LEAL -16842747(DX), DX - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy - -repeat_five_standalone_emit_copy: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end - -repeat_four_standalone_emit_copy: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_copy_end - -repeat_three_standalone_emit_copy: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - JMP gen_emit_copy_end - -repeat_two_standalone_emit_copy: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -repeat_two_offset_standalone_emit_copy: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -four_bytes_remain_standalone: - TESTL DX, DX - JZ gen_emit_copy_end - XORL SI, SI - LEAL -1(SI)(DX*4), DX - MOVB DL, (AX) - MOVL CX, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end - -two_byte_offset_standalone: - CMPL DX, $0x40 - JBE two_byte_offset_short_standalone - CMPL CX, $0x00000800 - JAE long_offset_short_standalone - MOVL $0x00000001, SI - LEAL 16(SI), SI - MOVB CL, 1(AX) - MOVL CX, DI - SHRL $0x08, DI - SHLL $0x05, DI - ORL DI, SI - MOVB SI, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - SUBL $0x08, DX - - // emitRepeat - LEAL -4(DX), DX - JMP cant_repeat_two_offset_standalone_emit_copy_short_2b - -emit_repeat_again_standalone_emit_copy_short_2b: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JBE repeat_two_standalone_emit_copy_short_2b - CMPL SI, $0x0c - JAE cant_repeat_two_offset_standalone_emit_copy_short_2b - CMPL CX, $0x00000800 - JB repeat_two_offset_standalone_emit_copy_short_2b - -cant_repeat_two_offset_standalone_emit_copy_short_2b: - CMPL DX, $0x00000104 - JB repeat_three_standalone_emit_copy_short_2b - CMPL DX, $0x00010100 - JB repeat_four_standalone_emit_copy_short_2b - CMPL DX, $0x0100ffff - JB repeat_five_standalone_emit_copy_short_2b - LEAL -16842747(DX), DX - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy_short_2b - -repeat_five_standalone_emit_copy_short_2b: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end - -repeat_four_standalone_emit_copy_short_2b: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_copy_end - -repeat_three_standalone_emit_copy_short_2b: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - JMP gen_emit_copy_end - -repeat_two_standalone_emit_copy_short_2b: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -repeat_two_offset_standalone_emit_copy_short_2b: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -long_offset_short_standalone: - MOVB $0xee, (AX) - MOVW CX, 1(AX) - LEAL -60(DX), DX - ADDQ $0x03, AX - ADDQ $0x03, BX - - // emitRepeat -emit_repeat_again_standalone_emit_copy_short: - MOVL DX, SI - LEAL -4(DX), DX - CMPL SI, $0x08 - JBE repeat_two_standalone_emit_copy_short - CMPL SI, $0x0c - JAE cant_repeat_two_offset_standalone_emit_copy_short - CMPL CX, $0x00000800 - JB repeat_two_offset_standalone_emit_copy_short - -cant_repeat_two_offset_standalone_emit_copy_short: - CMPL DX, $0x00000104 - JB repeat_three_standalone_emit_copy_short - CMPL DX, $0x00010100 - JB repeat_four_standalone_emit_copy_short - CMPL DX, $0x0100ffff - JB repeat_five_standalone_emit_copy_short - LEAL -16842747(DX), DX - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - ADDQ $0x05, BX - JMP emit_repeat_again_standalone_emit_copy_short - -repeat_five_standalone_emit_copy_short: - LEAL -65536(DX), DX - MOVL DX, CX - MOVW $0x001d, (AX) - MOVW DX, 2(AX) - SARL $0x10, CX - MOVB CL, 4(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end - -repeat_four_standalone_emit_copy_short: - LEAL -256(DX), DX - MOVW $0x0019, (AX) - MOVW DX, 2(AX) - ADDQ $0x04, BX - ADDQ $0x04, AX - JMP gen_emit_copy_end - -repeat_three_standalone_emit_copy_short: - LEAL -4(DX), DX - MOVW $0x0015, (AX) - MOVB DL, 2(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - JMP gen_emit_copy_end - -repeat_two_standalone_emit_copy_short: - SHLL $0x02, DX - ORL $0x01, DX - MOVW DX, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -repeat_two_offset_standalone_emit_copy_short: - XORQ SI, SI - LEAL 1(SI)(DX*4), DX - MOVB CL, 1(AX) - SARL $0x08, CX - SHLL $0x05, CX - ORL CX, DX - MOVB DL, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -two_byte_offset_short_standalone: - MOVL DX, SI - SHLL $0x02, SI - CMPL DX, $0x0c - JAE emit_copy_three_standalone - CMPL CX, $0x00000800 - JAE emit_copy_three_standalone - LEAL -15(SI), SI - MOVB CL, 1(AX) - SHRL $0x08, CX - SHLL $0x05, CX - ORL CX, SI - MOVB SI, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end - -emit_copy_three_standalone: - LEAL -2(SI), SI - MOVB SI, (AX) - MOVW CX, 1(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - -gen_emit_copy_end: - MOVQ BX, ret+40(FP) - RET - -// func emitCopyNoRepeat(dst []byte, offset int, length int) int -TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 - XORQ BX, BX - MOVQ dst_base+0(FP), AX - MOVQ offset+24(FP), CX - MOVQ length+32(FP), DX - - // emitCopy - CMPL CX, $0x00010000 - JB two_byte_offset_standalone_snappy - -four_bytes_loop_back_standalone_snappy: - CMPL DX, $0x40 - JBE four_bytes_remain_standalone_snappy - MOVB $0xff, (AX) - MOVL CX, 1(AX) - LEAL -64(DX), DX - ADDQ $0x05, BX - ADDQ $0x05, AX - CMPL DX, $0x04 - JB four_bytes_remain_standalone_snappy - JMP four_bytes_loop_back_standalone_snappy - -four_bytes_remain_standalone_snappy: - TESTL DX, DX - JZ gen_emit_copy_end_snappy - XORL SI, SI - LEAL -1(SI)(DX*4), DX - MOVB DL, (AX) - MOVL CX, 1(AX) - ADDQ $0x05, BX - ADDQ $0x05, AX - JMP gen_emit_copy_end_snappy - -two_byte_offset_standalone_snappy: - CMPL DX, $0x40 - JBE two_byte_offset_short_standalone_snappy - MOVB $0xee, (AX) - MOVW CX, 1(AX) - LEAL -60(DX), DX - ADDQ $0x03, AX - ADDQ $0x03, BX - JMP two_byte_offset_standalone_snappy - -two_byte_offset_short_standalone_snappy: - MOVL DX, SI - SHLL $0x02, SI - CMPL DX, $0x0c - JAE emit_copy_three_standalone_snappy - CMPL CX, $0x00000800 - JAE emit_copy_three_standalone_snappy - LEAL -15(SI), SI - MOVB CL, 1(AX) - SHRL $0x08, CX - SHLL $0x05, CX - ORL CX, SI - MOVB SI, (AX) - ADDQ $0x02, BX - ADDQ $0x02, AX - JMP gen_emit_copy_end_snappy - -emit_copy_three_standalone_snappy: - LEAL -2(SI), SI - MOVB SI, (AX) - MOVW CX, 1(AX) - ADDQ $0x03, BX - ADDQ $0x03, AX - -gen_emit_copy_end_snappy: - MOVQ BX, ret+40(FP) - RET - -// func matchLen(a []byte, b []byte) int -// Requires: BMI -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - -matchlen_loopback_16_standalone: - CMPL DX, $0x10 - JB matchlen_match8_standalone - MOVQ (AX)(SI*1), BX - MOVQ 8(AX)(SI*1), DI - XORQ (CX)(SI*1), BX - JNZ matchlen_bsf_8_standalone - XORQ 8(CX)(SI*1), DI - JNZ matchlen_bsf_16standalone - LEAL -16(DX), DX - LEAL 16(SI), SI - JMP matchlen_loopback_16_standalone - -matchlen_bsf_16standalone: -#ifdef GOAMD64_v3 - TZCNTQ DI, DI - -#else - BSFQ DI, DI - -#endif - SARQ $0x03, DI - LEAL 8(SI)(DI*1), SI - JMP gen_match_len_end - -matchlen_match8_standalone: - CMPL DX, $0x08 - JB matchlen_match4_standalone - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JNZ matchlen_bsf_8_standalone - LEAL -8(DX), DX - LEAL 8(SI), SI - JMP matchlen_match4_standalone - -matchlen_bsf_8_standalone: -#ifdef GOAMD64_v3 - TZCNTQ BX, BX - -#else - BSFQ BX, BX - -#endif - SARQ $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x01 - JE matchlen_match1_standalone - JB gen_match_len_end - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL 2(SI), SI - SUBL $0x02, DX - JZ gen_match_len_end - -matchlen_match1_standalone: - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - LEAL 1(SI), SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET - -// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) -// Requires: SSE2 -TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 - XORQ SI, SI - MOVQ dst_base+0(FP), AX - MOVQ dst_len+8(FP), CX - MOVQ src_base+24(FP), DX - MOVQ src_len+32(FP), BX - LEAQ (DX)(BX*1), BX - LEAQ -8(AX)(CX*1), CX - XORQ DI, DI - -lz4_s2_loop: - CMPQ DX, BX - JAE lz4_s2_corrupt - CMPQ AX, CX - JAE lz4_s2_dstfull - MOVBQZX (DX), R8 - MOVQ R8, R9 - MOVQ R8, R10 - SHRQ $0x04, R9 - ANDQ $0x0f, R10 - CMPQ R8, $0xf0 - JB lz4_s2_ll_end - -lz4_s2_ll_loop: - INCQ DX - CMPQ DX, BX - JAE lz4_s2_corrupt - MOVBQZX (DX), R8 - ADDQ R8, R9 - CMPQ R8, $0xff - JEQ lz4_s2_ll_loop - -lz4_s2_ll_end: - LEAQ (DX)(R9*1), R8 - ADDQ $0x04, R10 - CMPQ R8, BX - JAE lz4_s2_corrupt - INCQ DX - INCQ R8 - TESTQ R9, R9 - JZ lz4_s2_lits_done - LEAQ (AX)(R9*1), R11 - CMPQ R11, CX - JAE lz4_s2_dstfull - ADDQ R9, SI - LEAL -1(R9), R11 - CMPL R11, $0x3c - JB one_byte_lz4_s2 - CMPL R11, $0x00000100 - JB two_bytes_lz4_s2 - CMPL R11, $0x00010000 - JB three_bytes_lz4_s2 - CMPL R11, $0x01000000 - JB four_bytes_lz4_s2 - MOVB $0xfc, (AX) - MOVL R11, 1(AX) - ADDQ $0x05, AX - JMP memmove_long_lz4_s2 - -four_bytes_lz4_s2: - MOVL R11, R12 - SHRL $0x10, R12 - MOVB $0xf8, (AX) - MOVW R11, 1(AX) - MOVB R12, 3(AX) - ADDQ $0x04, AX - JMP memmove_long_lz4_s2 - -three_bytes_lz4_s2: - MOVB $0xf4, (AX) - MOVW R11, 1(AX) - ADDQ $0x03, AX - JMP memmove_long_lz4_s2 - -two_bytes_lz4_s2: - MOVB $0xf0, (AX) - MOVB R11, 1(AX) - ADDQ $0x02, AX - CMPL R11, $0x40 - JB memmove_lz4_s2 - JMP memmove_long_lz4_s2 - -one_byte_lz4_s2: - SHLB $0x02, R11 - MOVB R11, (AX) - ADDQ $0x01, AX - -memmove_lz4_s2: - LEAQ (AX)(R9*1), R11 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_lz4_s2_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 - JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 - -emit_lit_memmove_lz4_s2_memmove_move_8: - MOVQ (DX), R12 - MOVQ R12, (AX) - JMP memmove_end_copy_lz4_s2 - -emit_lit_memmove_lz4_s2_memmove_move_8through16: - MOVQ (DX), R12 - MOVQ -8(DX)(R9*1), DX - MOVQ R12, (AX) - MOVQ DX, -8(AX)(R9*1) - JMP memmove_end_copy_lz4_s2 - -emit_lit_memmove_lz4_s2_memmove_move_17through32: - MOVOU (DX), X0 - MOVOU -16(DX)(R9*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) - JMP memmove_end_copy_lz4_s2 - -emit_lit_memmove_lz4_s2_memmove_move_33through64: - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R9*1), X2 - MOVOU -16(DX)(R9*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - -memmove_end_copy_lz4_s2: - MOVQ R11, AX - JMP lz4_s2_lits_emit_done - -memmove_long_lz4_s2: - LEAQ (AX)(R9*1), R11 - - // genMemMoveLong - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R9*1), X2 - MOVOU -16(DX)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R12 - ANDL $0x0000001f, R12 - MOVQ $0x00000040, R14 - SUBQ R12, R14 - DECQ R13 - JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 - LEAQ -32(DX)(R14*1), R12 - LEAQ -32(AX)(R14*1), R15 - -emit_lit_memmove_long_lz4_s2large_big_loop_back: - MOVOU (R12), X4 - MOVOU 16(R12), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R12 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_lz4_s2large_big_loop_back - -emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: - MOVOU -32(DX)(R14*1), X4 - MOVOU -16(DX)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R11, AX - -lz4_s2_lits_emit_done: - MOVQ R8, DX - -lz4_s2_lits_done: - CMPQ DX, BX - JNE lz4_s2_match - CMPQ R10, $0x04 - JEQ lz4_s2_done - JMP lz4_s2_corrupt - -lz4_s2_match: - LEAQ 2(DX), R8 - CMPQ R8, BX - JAE lz4_s2_corrupt - MOVWQZX (DX), R9 - MOVQ R8, DX - TESTQ R9, R9 - JZ lz4_s2_corrupt - CMPQ R9, SI - JA lz4_s2_corrupt - CMPQ R10, $0x13 - JNE lz4_s2_ml_done - -lz4_s2_ml_loop: - MOVBQZX (DX), R8 - INCQ DX - ADDQ R8, R10 - CMPQ DX, BX - JAE lz4_s2_corrupt - CMPQ R8, $0xff - JEQ lz4_s2_ml_loop - -lz4_s2_ml_done: - ADDQ R10, SI - CMPQ R9, DI - JNE lz4_s2_docopy - - // emitRepeat -emit_repeat_again_lz4_s2: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2 - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2 - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2 - -cant_repeat_two_offset_lz4_s2: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2 - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2 - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2 - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2 - -repeat_five_lz4_s2: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4_s2_loop - -repeat_four_lz4_s2: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4_s2_loop - -repeat_three_lz4_s2: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4_s2_loop - -repeat_two_lz4_s2: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -repeat_two_offset_lz4_s2: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -lz4_s2_docopy: - MOVQ R9, DI - - // emitCopy - CMPL R10, $0x40 - JBE two_byte_offset_short_lz4_s2 - CMPL R9, $0x00000800 - JAE long_offset_short_lz4_s2 - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB R9, 1(AX) - MOVL R9, R11 - SHRL $0x08, R11 - SHLL $0x05, R11 - ORL R11, R8 - MOVB R8, (AX) - ADDQ $0x02, AX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b - -emit_repeat_again_lz4_s2_emit_copy_short_2b: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2_emit_copy_short_2b - -cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2_emit_copy_short_2b - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2_emit_copy_short_2b - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2_emit_copy_short_2b - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2_emit_copy_short_2b - -repeat_five_lz4_s2_emit_copy_short_2b: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4_s2_loop - -repeat_four_lz4_s2_emit_copy_short_2b: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4_s2_loop - -repeat_three_lz4_s2_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4_s2_loop - -repeat_two_lz4_s2_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -repeat_two_offset_lz4_s2_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -long_offset_short_lz4_s2: - MOVB $0xee, (AX) - MOVW R9, 1(AX) - LEAL -60(R10), R10 - ADDQ $0x03, AX - - // emitRepeat -emit_repeat_again_lz4_s2_emit_copy_short: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2_emit_copy_short - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2_emit_copy_short - -cant_repeat_two_offset_lz4_s2_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2_emit_copy_short - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2_emit_copy_short - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2_emit_copy_short - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2_emit_copy_short - -repeat_five_lz4_s2_emit_copy_short: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4_s2_loop - -repeat_four_lz4_s2_emit_copy_short: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4_s2_loop - -repeat_three_lz4_s2_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4_s2_loop - -repeat_two_lz4_s2_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -repeat_two_offset_lz4_s2_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -two_byte_offset_short_lz4_s2: - MOVL R10, R8 - SHLL $0x02, R8 - CMPL R10, $0x0c - JAE emit_copy_three_lz4_s2 - CMPL R9, $0x00000800 - JAE emit_copy_three_lz4_s2 - LEAL -15(R8), R8 - MOVB R9, 1(AX) - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R8 - MOVB R8, (AX) - ADDQ $0x02, AX - JMP lz4_s2_loop - -emit_copy_three_lz4_s2: - LEAL -2(R8), R8 - MOVB R8, (AX) - MOVW R9, 1(AX) - ADDQ $0x03, AX - JMP lz4_s2_loop - -lz4_s2_done: - MOVQ dst_base+0(FP), CX - SUBQ CX, AX - MOVQ SI, uncompressed+48(FP) - MOVQ AX, dstUsed+56(FP) - RET - -lz4_s2_corrupt: - XORQ AX, AX - LEAQ -1(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -lz4_s2_dstfull: - XORQ AX, AX - LEAQ -2(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) -// Requires: SSE2 -TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 - XORQ SI, SI - MOVQ dst_base+0(FP), AX - MOVQ dst_len+8(FP), CX - MOVQ src_base+24(FP), DX - MOVQ src_len+32(FP), BX - LEAQ (DX)(BX*1), BX - LEAQ -8(AX)(CX*1), CX - XORQ DI, DI - -lz4s_s2_loop: - CMPQ DX, BX - JAE lz4s_s2_corrupt - CMPQ AX, CX - JAE lz4s_s2_dstfull - MOVBQZX (DX), R8 - MOVQ R8, R9 - MOVQ R8, R10 - SHRQ $0x04, R9 - ANDQ $0x0f, R10 - CMPQ R8, $0xf0 - JB lz4s_s2_ll_end - -lz4s_s2_ll_loop: - INCQ DX - CMPQ DX, BX - JAE lz4s_s2_corrupt - MOVBQZX (DX), R8 - ADDQ R8, R9 - CMPQ R8, $0xff - JEQ lz4s_s2_ll_loop - -lz4s_s2_ll_end: - LEAQ (DX)(R9*1), R8 - ADDQ $0x03, R10 - CMPQ R8, BX - JAE lz4s_s2_corrupt - INCQ DX - INCQ R8 - TESTQ R9, R9 - JZ lz4s_s2_lits_done - LEAQ (AX)(R9*1), R11 - CMPQ R11, CX - JAE lz4s_s2_dstfull - ADDQ R9, SI - LEAL -1(R9), R11 - CMPL R11, $0x3c - JB one_byte_lz4s_s2 - CMPL R11, $0x00000100 - JB two_bytes_lz4s_s2 - CMPL R11, $0x00010000 - JB three_bytes_lz4s_s2 - CMPL R11, $0x01000000 - JB four_bytes_lz4s_s2 - MOVB $0xfc, (AX) - MOVL R11, 1(AX) - ADDQ $0x05, AX - JMP memmove_long_lz4s_s2 - -four_bytes_lz4s_s2: - MOVL R11, R12 - SHRL $0x10, R12 - MOVB $0xf8, (AX) - MOVW R11, 1(AX) - MOVB R12, 3(AX) - ADDQ $0x04, AX - JMP memmove_long_lz4s_s2 - -three_bytes_lz4s_s2: - MOVB $0xf4, (AX) - MOVW R11, 1(AX) - ADDQ $0x03, AX - JMP memmove_long_lz4s_s2 - -two_bytes_lz4s_s2: - MOVB $0xf0, (AX) - MOVB R11, 1(AX) - ADDQ $0x02, AX - CMPL R11, $0x40 - JB memmove_lz4s_s2 - JMP memmove_long_lz4s_s2 - -one_byte_lz4s_s2: - SHLB $0x02, R11 - MOVB R11, (AX) - ADDQ $0x01, AX - -memmove_lz4s_s2: - LEAQ (AX)(R9*1), R11 - - // genMemMoveShort - CMPQ R9, $0x08 - JBE emit_lit_memmove_lz4s_s2_memmove_move_8 - CMPQ R9, $0x10 - JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 - CMPQ R9, $0x20 - JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 - JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 - -emit_lit_memmove_lz4s_s2_memmove_move_8: - MOVQ (DX), R12 - MOVQ R12, (AX) - JMP memmove_end_copy_lz4s_s2 - -emit_lit_memmove_lz4s_s2_memmove_move_8through16: - MOVQ (DX), R12 - MOVQ -8(DX)(R9*1), DX - MOVQ R12, (AX) - MOVQ DX, -8(AX)(R9*1) - JMP memmove_end_copy_lz4s_s2 - -emit_lit_memmove_lz4s_s2_memmove_move_17through32: - MOVOU (DX), X0 - MOVOU -16(DX)(R9*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(R9*1) - JMP memmove_end_copy_lz4s_s2 - -emit_lit_memmove_lz4s_s2_memmove_move_33through64: - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R9*1), X2 - MOVOU -16(DX)(R9*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - -memmove_end_copy_lz4s_s2: - MOVQ R11, AX - JMP lz4s_s2_lits_emit_done - -memmove_long_lz4s_s2: - LEAQ (AX)(R9*1), R11 - - // genMemMoveLong - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R9*1), X2 - MOVOU -16(DX)(R9*1), X3 - MOVQ R9, R13 - SHRQ $0x05, R13 - MOVQ AX, R12 - ANDL $0x0000001f, R12 - MOVQ $0x00000040, R14 - SUBQ R12, R14 - DECQ R13 - JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 - LEAQ -32(DX)(R14*1), R12 - LEAQ -32(AX)(R14*1), R15 - -emit_lit_memmove_long_lz4s_s2large_big_loop_back: - MOVOU (R12), X4 - MOVOU 16(R12), X5 - MOVOA X4, (R15) - MOVOA X5, 16(R15) - ADDQ $0x20, R15 - ADDQ $0x20, R12 - ADDQ $0x20, R14 - DECQ R13 - JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back - -emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: - MOVOU -32(DX)(R14*1), X4 - MOVOU -16(DX)(R14*1), X5 - MOVOA X4, -32(AX)(R14*1) - MOVOA X5, -16(AX)(R14*1) - ADDQ $0x20, R14 - CMPQ R9, R14 - JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R9*1) - MOVOU X3, -16(AX)(R9*1) - MOVQ R11, AX - -lz4s_s2_lits_emit_done: - MOVQ R8, DX - -lz4s_s2_lits_done: - CMPQ DX, BX - JNE lz4s_s2_match - CMPQ R10, $0x03 - JEQ lz4s_s2_done - JMP lz4s_s2_corrupt - -lz4s_s2_match: - CMPQ R10, $0x03 - JEQ lz4s_s2_loop - LEAQ 2(DX), R8 - CMPQ R8, BX - JAE lz4s_s2_corrupt - MOVWQZX (DX), R9 - MOVQ R8, DX - TESTQ R9, R9 - JZ lz4s_s2_corrupt - CMPQ R9, SI - JA lz4s_s2_corrupt - CMPQ R10, $0x12 - JNE lz4s_s2_ml_done - -lz4s_s2_ml_loop: - MOVBQZX (DX), R8 - INCQ DX - ADDQ R8, R10 - CMPQ DX, BX - JAE lz4s_s2_corrupt - CMPQ R8, $0xff - JEQ lz4s_s2_ml_loop - -lz4s_s2_ml_done: - ADDQ R10, SI - CMPQ R9, DI - JNE lz4s_s2_docopy - - // emitRepeat -emit_repeat_again_lz4_s2: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2 - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2 - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2 - -cant_repeat_two_offset_lz4_s2: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2 - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2 - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2 - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2 - -repeat_five_lz4_s2: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4s_s2_loop - -repeat_four_lz4_s2: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4s_s2_loop - -repeat_three_lz4_s2: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4s_s2_loop - -repeat_two_lz4_s2: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -repeat_two_offset_lz4_s2: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -lz4s_s2_docopy: - MOVQ R9, DI - - // emitCopy - CMPL R10, $0x40 - JBE two_byte_offset_short_lz4_s2 - CMPL R9, $0x00000800 - JAE long_offset_short_lz4_s2 - MOVL $0x00000001, R8 - LEAL 16(R8), R8 - MOVB R9, 1(AX) - MOVL R9, R11 - SHRL $0x08, R11 - SHLL $0x05, R11 - ORL R11, R8 - MOVB R8, (AX) - ADDQ $0x02, AX - SUBL $0x08, R10 - - // emitRepeat - LEAL -4(R10), R10 - JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b - -emit_repeat_again_lz4_s2_emit_copy_short_2b: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2_emit_copy_short_2b - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2_emit_copy_short_2b - -cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2_emit_copy_short_2b - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2_emit_copy_short_2b - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2_emit_copy_short_2b - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2_emit_copy_short_2b - -repeat_five_lz4_s2_emit_copy_short_2b: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4s_s2_loop - -repeat_four_lz4_s2_emit_copy_short_2b: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4s_s2_loop - -repeat_three_lz4_s2_emit_copy_short_2b: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4s_s2_loop - -repeat_two_lz4_s2_emit_copy_short_2b: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -repeat_two_offset_lz4_s2_emit_copy_short_2b: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -long_offset_short_lz4_s2: - MOVB $0xee, (AX) - MOVW R9, 1(AX) - LEAL -60(R10), R10 - ADDQ $0x03, AX - - // emitRepeat -emit_repeat_again_lz4_s2_emit_copy_short: - MOVL R10, R8 - LEAL -4(R10), R10 - CMPL R8, $0x08 - JBE repeat_two_lz4_s2_emit_copy_short - CMPL R8, $0x0c - JAE cant_repeat_two_offset_lz4_s2_emit_copy_short - CMPL R9, $0x00000800 - JB repeat_two_offset_lz4_s2_emit_copy_short - -cant_repeat_two_offset_lz4_s2_emit_copy_short: - CMPL R10, $0x00000104 - JB repeat_three_lz4_s2_emit_copy_short - CMPL R10, $0x00010100 - JB repeat_four_lz4_s2_emit_copy_short - CMPL R10, $0x0100ffff - JB repeat_five_lz4_s2_emit_copy_short - LEAL -16842747(R10), R10 - MOVL $0xfffb001d, (AX) - MOVB $0xff, 4(AX) - ADDQ $0x05, AX - JMP emit_repeat_again_lz4_s2_emit_copy_short - -repeat_five_lz4_s2_emit_copy_short: - LEAL -65536(R10), R10 - MOVL R10, R9 - MOVW $0x001d, (AX) - MOVW R10, 2(AX) - SARL $0x10, R9 - MOVB R9, 4(AX) - ADDQ $0x05, AX - JMP lz4s_s2_loop - -repeat_four_lz4_s2_emit_copy_short: - LEAL -256(R10), R10 - MOVW $0x0019, (AX) - MOVW R10, 2(AX) - ADDQ $0x04, AX - JMP lz4s_s2_loop - -repeat_three_lz4_s2_emit_copy_short: - LEAL -4(R10), R10 - MOVW $0x0015, (AX) - MOVB R10, 2(AX) - ADDQ $0x03, AX - JMP lz4s_s2_loop - -repeat_two_lz4_s2_emit_copy_short: - SHLL $0x02, R10 - ORL $0x01, R10 - MOVW R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -repeat_two_offset_lz4_s2_emit_copy_short: - XORQ R8, R8 - LEAL 1(R8)(R10*4), R10 - MOVB R9, 1(AX) - SARL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R10 - MOVB R10, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -two_byte_offset_short_lz4_s2: - MOVL R10, R8 - SHLL $0x02, R8 - CMPL R10, $0x0c - JAE emit_copy_three_lz4_s2 - CMPL R9, $0x00000800 - JAE emit_copy_three_lz4_s2 - LEAL -15(R8), R8 - MOVB R9, 1(AX) - SHRL $0x08, R9 - SHLL $0x05, R9 - ORL R9, R8 - MOVB R8, (AX) - ADDQ $0x02, AX - JMP lz4s_s2_loop - -emit_copy_three_lz4_s2: - LEAL -2(R8), R8 - MOVB R8, (AX) - MOVW R9, 1(AX) - ADDQ $0x03, AX - JMP lz4s_s2_loop - -lz4s_s2_done: - MOVQ dst_base+0(FP), CX - SUBQ CX, AX - MOVQ SI, uncompressed+48(FP) - MOVQ AX, dstUsed+56(FP) - RET - -lz4s_s2_corrupt: - XORQ AX, AX - LEAQ -1(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -lz4s_s2_dstfull: - XORQ AX, AX - LEAQ -2(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) -// Requires: SSE2 -TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 - XORQ SI, SI - MOVQ dst_base+0(FP), AX - MOVQ dst_len+8(FP), CX - MOVQ src_base+24(FP), DX - MOVQ src_len+32(FP), BX - LEAQ (DX)(BX*1), BX - LEAQ -8(AX)(CX*1), CX - -lz4_snappy_loop: - CMPQ DX, BX - JAE lz4_snappy_corrupt - CMPQ AX, CX - JAE lz4_snappy_dstfull - MOVBQZX (DX), DI - MOVQ DI, R8 - MOVQ DI, R9 - SHRQ $0x04, R8 - ANDQ $0x0f, R9 - CMPQ DI, $0xf0 - JB lz4_snappy_ll_end - -lz4_snappy_ll_loop: - INCQ DX - CMPQ DX, BX - JAE lz4_snappy_corrupt - MOVBQZX (DX), DI - ADDQ DI, R8 - CMPQ DI, $0xff - JEQ lz4_snappy_ll_loop - -lz4_snappy_ll_end: - LEAQ (DX)(R8*1), DI - ADDQ $0x04, R9 - CMPQ DI, BX - JAE lz4_snappy_corrupt - INCQ DX - INCQ DI - TESTQ R8, R8 - JZ lz4_snappy_lits_done - LEAQ (AX)(R8*1), R10 - CMPQ R10, CX - JAE lz4_snappy_dstfull - ADDQ R8, SI - LEAL -1(R8), R10 - CMPL R10, $0x3c - JB one_byte_lz4_snappy - CMPL R10, $0x00000100 - JB two_bytes_lz4_snappy - CMPL R10, $0x00010000 - JB three_bytes_lz4_snappy - CMPL R10, $0x01000000 - JB four_bytes_lz4_snappy - MOVB $0xfc, (AX) - MOVL R10, 1(AX) - ADDQ $0x05, AX - JMP memmove_long_lz4_snappy - -four_bytes_lz4_snappy: - MOVL R10, R11 - SHRL $0x10, R11 - MOVB $0xf8, (AX) - MOVW R10, 1(AX) - MOVB R11, 3(AX) - ADDQ $0x04, AX - JMP memmove_long_lz4_snappy - -three_bytes_lz4_snappy: - MOVB $0xf4, (AX) - MOVW R10, 1(AX) - ADDQ $0x03, AX - JMP memmove_long_lz4_snappy - -two_bytes_lz4_snappy: - MOVB $0xf0, (AX) - MOVB R10, 1(AX) - ADDQ $0x02, AX - CMPL R10, $0x40 - JB memmove_lz4_snappy - JMP memmove_long_lz4_snappy - -one_byte_lz4_snappy: - SHLB $0x02, R10 - MOVB R10, (AX) - ADDQ $0x01, AX - -memmove_lz4_snappy: - LEAQ (AX)(R8*1), R10 - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_lz4_snappy_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 - JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 - -emit_lit_memmove_lz4_snappy_memmove_move_8: - MOVQ (DX), R11 - MOVQ R11, (AX) - JMP memmove_end_copy_lz4_snappy - -emit_lit_memmove_lz4_snappy_memmove_move_8through16: - MOVQ (DX), R11 - MOVQ -8(DX)(R8*1), DX - MOVQ R11, (AX) - MOVQ DX, -8(AX)(R8*1) - JMP memmove_end_copy_lz4_snappy - -emit_lit_memmove_lz4_snappy_memmove_move_17through32: - MOVOU (DX), X0 - MOVOU -16(DX)(R8*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) - JMP memmove_end_copy_lz4_snappy - -emit_lit_memmove_lz4_snappy_memmove_move_33through64: - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R8*1), X2 - MOVOU -16(DX)(R8*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - -memmove_end_copy_lz4_snappy: - MOVQ R10, AX - JMP lz4_snappy_lits_emit_done - -memmove_long_lz4_snappy: - LEAQ (AX)(R8*1), R10 - - // genMemMoveLong - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R8*1), X2 - MOVOU -16(DX)(R8*1), X3 - MOVQ R8, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 - LEAQ -32(DX)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 - -emit_lit_memmove_long_lz4_snappylarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back - -emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: - MOVOU -32(DX)(R13*1), X4 - MOVOU -16(DX)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R8, R13 - JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ R10, AX - -lz4_snappy_lits_emit_done: - MOVQ DI, DX - -lz4_snappy_lits_done: - CMPQ DX, BX - JNE lz4_snappy_match - CMPQ R9, $0x04 - JEQ lz4_snappy_done - JMP lz4_snappy_corrupt - -lz4_snappy_match: - LEAQ 2(DX), DI - CMPQ DI, BX - JAE lz4_snappy_corrupt - MOVWQZX (DX), R8 - MOVQ DI, DX - TESTQ R8, R8 - JZ lz4_snappy_corrupt - CMPQ R8, SI - JA lz4_snappy_corrupt - CMPQ R9, $0x13 - JNE lz4_snappy_ml_done - -lz4_snappy_ml_loop: - MOVBQZX (DX), DI - INCQ DX - ADDQ DI, R9 - CMPQ DX, BX - JAE lz4_snappy_corrupt - CMPQ DI, $0xff - JEQ lz4_snappy_ml_loop - -lz4_snappy_ml_done: - ADDQ R9, SI - - // emitCopy -two_byte_offset_lz4_s2: - CMPL R9, $0x40 - JBE two_byte_offset_short_lz4_s2 - MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R9), R9 - ADDQ $0x03, AX - CMPQ AX, CX - JAE lz4_snappy_loop - JMP two_byte_offset_lz4_s2 - -two_byte_offset_short_lz4_s2: - MOVL R9, DI - SHLL $0x02, DI - CMPL R9, $0x0c - JAE emit_copy_three_lz4_s2 - CMPL R8, $0x00000800 - JAE emit_copy_three_lz4_s2 - LEAL -15(DI), DI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, DI - MOVB DI, (AX) - ADDQ $0x02, AX - JMP lz4_snappy_loop - -emit_copy_three_lz4_s2: - LEAL -2(DI), DI - MOVB DI, (AX) - MOVW R8, 1(AX) - ADDQ $0x03, AX - JMP lz4_snappy_loop - -lz4_snappy_done: - MOVQ dst_base+0(FP), CX - SUBQ CX, AX - MOVQ SI, uncompressed+48(FP) - MOVQ AX, dstUsed+56(FP) - RET - -lz4_snappy_corrupt: - XORQ AX, AX - LEAQ -1(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -lz4_snappy_dstfull: - XORQ AX, AX - LEAQ -2(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) -// Requires: SSE2 -TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 - XORQ SI, SI - MOVQ dst_base+0(FP), AX - MOVQ dst_len+8(FP), CX - MOVQ src_base+24(FP), DX - MOVQ src_len+32(FP), BX - LEAQ (DX)(BX*1), BX - LEAQ -8(AX)(CX*1), CX - -lz4s_snappy_loop: - CMPQ DX, BX - JAE lz4s_snappy_corrupt - CMPQ AX, CX - JAE lz4s_snappy_dstfull - MOVBQZX (DX), DI - MOVQ DI, R8 - MOVQ DI, R9 - SHRQ $0x04, R8 - ANDQ $0x0f, R9 - CMPQ DI, $0xf0 - JB lz4s_snappy_ll_end - -lz4s_snappy_ll_loop: - INCQ DX - CMPQ DX, BX - JAE lz4s_snappy_corrupt - MOVBQZX (DX), DI - ADDQ DI, R8 - CMPQ DI, $0xff - JEQ lz4s_snappy_ll_loop - -lz4s_snappy_ll_end: - LEAQ (DX)(R8*1), DI - ADDQ $0x03, R9 - CMPQ DI, BX - JAE lz4s_snappy_corrupt - INCQ DX - INCQ DI - TESTQ R8, R8 - JZ lz4s_snappy_lits_done - LEAQ (AX)(R8*1), R10 - CMPQ R10, CX - JAE lz4s_snappy_dstfull - ADDQ R8, SI - LEAL -1(R8), R10 - CMPL R10, $0x3c - JB one_byte_lz4s_snappy - CMPL R10, $0x00000100 - JB two_bytes_lz4s_snappy - CMPL R10, $0x00010000 - JB three_bytes_lz4s_snappy - CMPL R10, $0x01000000 - JB four_bytes_lz4s_snappy - MOVB $0xfc, (AX) - MOVL R10, 1(AX) - ADDQ $0x05, AX - JMP memmove_long_lz4s_snappy - -four_bytes_lz4s_snappy: - MOVL R10, R11 - SHRL $0x10, R11 - MOVB $0xf8, (AX) - MOVW R10, 1(AX) - MOVB R11, 3(AX) - ADDQ $0x04, AX - JMP memmove_long_lz4s_snappy - -three_bytes_lz4s_snappy: - MOVB $0xf4, (AX) - MOVW R10, 1(AX) - ADDQ $0x03, AX - JMP memmove_long_lz4s_snappy - -two_bytes_lz4s_snappy: - MOVB $0xf0, (AX) - MOVB R10, 1(AX) - ADDQ $0x02, AX - CMPL R10, $0x40 - JB memmove_lz4s_snappy - JMP memmove_long_lz4s_snappy - -one_byte_lz4s_snappy: - SHLB $0x02, R10 - MOVB R10, (AX) - ADDQ $0x01, AX - -memmove_lz4s_snappy: - LEAQ (AX)(R8*1), R10 - - // genMemMoveShort - CMPQ R8, $0x08 - JBE emit_lit_memmove_lz4s_snappy_memmove_move_8 - CMPQ R8, $0x10 - JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 - CMPQ R8, $0x20 - JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 - JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 - -emit_lit_memmove_lz4s_snappy_memmove_move_8: - MOVQ (DX), R11 - MOVQ R11, (AX) - JMP memmove_end_copy_lz4s_snappy - -emit_lit_memmove_lz4s_snappy_memmove_move_8through16: - MOVQ (DX), R11 - MOVQ -8(DX)(R8*1), DX - MOVQ R11, (AX) - MOVQ DX, -8(AX)(R8*1) - JMP memmove_end_copy_lz4s_snappy - -emit_lit_memmove_lz4s_snappy_memmove_move_17through32: - MOVOU (DX), X0 - MOVOU -16(DX)(R8*1), X1 - MOVOU X0, (AX) - MOVOU X1, -16(AX)(R8*1) - JMP memmove_end_copy_lz4s_snappy - -emit_lit_memmove_lz4s_snappy_memmove_move_33through64: - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R8*1), X2 - MOVOU -16(DX)(R8*1), X3 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - -memmove_end_copy_lz4s_snappy: - MOVQ R10, AX - JMP lz4s_snappy_lits_emit_done - -memmove_long_lz4s_snappy: - LEAQ (AX)(R8*1), R10 - - // genMemMoveLong - MOVOU (DX), X0 - MOVOU 16(DX), X1 - MOVOU -32(DX)(R8*1), X2 - MOVOU -16(DX)(R8*1), X3 - MOVQ R8, R12 - SHRQ $0x05, R12 - MOVQ AX, R11 - ANDL $0x0000001f, R11 - MOVQ $0x00000040, R13 - SUBQ R11, R13 - DECQ R12 - JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 - LEAQ -32(DX)(R13*1), R11 - LEAQ -32(AX)(R13*1), R14 - -emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: - MOVOU (R11), X4 - MOVOU 16(R11), X5 - MOVOA X4, (R14) - MOVOA X5, 16(R14) - ADDQ $0x20, R14 - ADDQ $0x20, R11 - ADDQ $0x20, R13 - DECQ R12 - JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back - -emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: - MOVOU -32(DX)(R13*1), X4 - MOVOU -16(DX)(R13*1), X5 - MOVOA X4, -32(AX)(R13*1) - MOVOA X5, -16(AX)(R13*1) - ADDQ $0x20, R13 - CMPQ R8, R13 - JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 - MOVOU X0, (AX) - MOVOU X1, 16(AX) - MOVOU X2, -32(AX)(R8*1) - MOVOU X3, -16(AX)(R8*1) - MOVQ R10, AX - -lz4s_snappy_lits_emit_done: - MOVQ DI, DX - -lz4s_snappy_lits_done: - CMPQ DX, BX - JNE lz4s_snappy_match - CMPQ R9, $0x03 - JEQ lz4s_snappy_done - JMP lz4s_snappy_corrupt - -lz4s_snappy_match: - CMPQ R9, $0x03 - JEQ lz4s_snappy_loop - LEAQ 2(DX), DI - CMPQ DI, BX - JAE lz4s_snappy_corrupt - MOVWQZX (DX), R8 - MOVQ DI, DX - TESTQ R8, R8 - JZ lz4s_snappy_corrupt - CMPQ R8, SI - JA lz4s_snappy_corrupt - CMPQ R9, $0x12 - JNE lz4s_snappy_ml_done - -lz4s_snappy_ml_loop: - MOVBQZX (DX), DI - INCQ DX - ADDQ DI, R9 - CMPQ DX, BX - JAE lz4s_snappy_corrupt - CMPQ DI, $0xff - JEQ lz4s_snappy_ml_loop - -lz4s_snappy_ml_done: - ADDQ R9, SI - - // emitCopy -two_byte_offset_lz4_s2: - CMPL R9, $0x40 - JBE two_byte_offset_short_lz4_s2 - MOVB $0xee, (AX) - MOVW R8, 1(AX) - LEAL -60(R9), R9 - ADDQ $0x03, AX - CMPQ AX, CX - JAE lz4s_snappy_loop - JMP two_byte_offset_lz4_s2 - -two_byte_offset_short_lz4_s2: - MOVL R9, DI - SHLL $0x02, DI - CMPL R9, $0x0c - JAE emit_copy_three_lz4_s2 - CMPL R8, $0x00000800 - JAE emit_copy_three_lz4_s2 - LEAL -15(DI), DI - MOVB R8, 1(AX) - SHRL $0x08, R8 - SHLL $0x05, R8 - ORL R8, DI - MOVB DI, (AX) - ADDQ $0x02, AX - JMP lz4s_snappy_loop - -emit_copy_three_lz4_s2: - LEAL -2(DI), DI - MOVB DI, (AX) - MOVW R8, 1(AX) - ADDQ $0x03, AX - JMP lz4s_snappy_loop - -lz4s_snappy_done: - MOVQ dst_base+0(FP), CX - SUBQ CX, AX - MOVQ SI, uncompressed+48(FP) - MOVQ AX, dstUsed+56(FP) - RET - -lz4s_snappy_corrupt: - XORQ AX, AX - LEAQ -1(AX), SI - MOVQ SI, uncompressed+48(FP) - RET - -lz4s_snappy_dstfull: - XORQ AX, AX - LEAQ -2(AX), SI - MOVQ SI, uncompressed+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go deleted file mode 100644 index 4229957b9..000000000 --- a/vendor/github.com/klauspost/compress/s2/index.go +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright (c) 2022+ Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "sort" -) - -const ( - S2IndexHeader = "s2idx\x00" - S2IndexTrailer = "\x00xdi2s" - maxIndexEntries = 1 << 16 - // If distance is less than this, we do not add the entry. - minIndexDist = 1 << 20 -) - -// Index represents an S2/Snappy index. -type Index struct { - TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. - TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. - info []struct { - compressedOffset int64 - uncompressedOffset int64 - } - estBlockUncomp int64 -} - -func (i *Index) reset(maxBlock int) { - i.estBlockUncomp = int64(maxBlock) - i.TotalCompressed = -1 - i.TotalUncompressed = -1 - if len(i.info) > 0 { - i.info = i.info[:0] - } -} - -// allocInfos will allocate an empty slice of infos. -func (i *Index) allocInfos(n int) { - if n > maxIndexEntries { - panic("n > maxIndexEntries") - } - i.info = make([]struct { - compressedOffset int64 - uncompressedOffset int64 - }, 0, n) -} - -// add an uncompressed and compressed pair. -// Entries must be sent in order. -func (i *Index) add(compressedOffset, uncompressedOffset int64) error { - if i == nil { - return nil - } - lastIdx := len(i.info) - 1 - if lastIdx >= 0 { - latest := i.info[lastIdx] - if latest.uncompressedOffset == uncompressedOffset { - // Uncompressed didn't change, don't add entry, - // but update start index. - latest.compressedOffset = compressedOffset - i.info[lastIdx] = latest - return nil - } - if latest.uncompressedOffset > uncompressedOffset { - return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) - } - if latest.compressedOffset > compressedOffset { - return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) - } - if latest.uncompressedOffset+minIndexDist > uncompressedOffset { - // Only add entry if distance is large enough. - return nil - } - } - i.info = append(i.info, struct { - compressedOffset int64 - uncompressedOffset int64 - }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) - return nil -} - -// Find the offset at or before the wanted (uncompressed) offset. -// If offset is 0 or positive it is the offset from the beginning of the file. -// If the uncompressed size is known, the offset must be within the file. -// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. -// If the offset is negative, it is interpreted as the distance from the end of the file, -// where -1 represents the last byte. -// If offset from the end of the file is requested, but size is unknown, -// ErrUnsupported will be returned. -func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { - if i.TotalUncompressed < 0 { - return 0, 0, ErrCorrupt - } - if offset < 0 { - offset = i.TotalUncompressed + offset - if offset < 0 { - return 0, 0, io.ErrUnexpectedEOF - } - } - if offset > i.TotalUncompressed { - return 0, 0, io.ErrUnexpectedEOF - } - if len(i.info) > 200 { - n := sort.Search(len(i.info), func(n int) bool { - return i.info[n].uncompressedOffset > offset - }) - if n == 0 { - n = 1 - } - return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil - } - for _, info := range i.info { - if info.uncompressedOffset > offset { - break - } - compressedOff = info.compressedOffset - uncompressedOff = info.uncompressedOffset - } - return compressedOff, uncompressedOff, nil -} - -// reduce to stay below maxIndexEntries -func (i *Index) reduce() { - if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { - return - } - - // Algorithm, keep 1, remove removeN entries... - removeN := (len(i.info) + 1) / maxIndexEntries - src := i.info - j := 0 - - // Each block should be at least 1MB, but don't reduce below 1000 entries. - for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { - removeN++ - } - for idx := 0; idx < len(src); idx++ { - i.info[j] = src[idx] - j++ - idx += removeN - } - i.info = i.info[:j] - // Update maxblock estimate. - i.estBlockUncomp += i.estBlockUncomp * int64(removeN) -} - -func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { - i.reduce() - var tmp [binary.MaxVarintLen64]byte - - initSize := len(b) - // We make the start a skippable header+size. - b = append(b, ChunkTypeIndex, 0, 0, 0) - b = append(b, []byte(S2IndexHeader)...) - // Total Uncompressed size - n := binary.PutVarint(tmp[:], uncompTotal) - b = append(b, tmp[:n]...) - // Total Compressed size - n = binary.PutVarint(tmp[:], compTotal) - b = append(b, tmp[:n]...) - // Put EstBlockUncomp size - n = binary.PutVarint(tmp[:], i.estBlockUncomp) - b = append(b, tmp[:n]...) - // Put length - n = binary.PutVarint(tmp[:], int64(len(i.info))) - b = append(b, tmp[:n]...) - - // Check if we should add uncompressed offsets - var hasUncompressed byte - for idx, info := range i.info { - if idx == 0 { - if info.uncompressedOffset != 0 { - hasUncompressed = 1 - break - } - continue - } - if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { - hasUncompressed = 1 - break - } - } - b = append(b, hasUncompressed) - - // Add each entry - if hasUncompressed == 1 { - for idx, info := range i.info { - uOff := info.uncompressedOffset - if idx > 0 { - prev := i.info[idx-1] - uOff -= prev.uncompressedOffset + (i.estBlockUncomp) - } - n = binary.PutVarint(tmp[:], uOff) - b = append(b, tmp[:n]...) - } - } - - // Initial compressed size estimate. - cPredict := i.estBlockUncomp / 2 - - for idx, info := range i.info { - cOff := info.compressedOffset - if idx > 0 { - prev := i.info[idx-1] - cOff -= prev.compressedOffset + cPredict - // Update compressed size prediction, with half the error. - cPredict += cOff / 2 - } - n = binary.PutVarint(tmp[:], cOff) - b = append(b, tmp[:n]...) - } - - // Add Total Size. - // Stored as fixed size for easier reading. - binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) - b = append(b, tmp[:4]...) - // Trailer - b = append(b, []byte(S2IndexTrailer)...) - - // Update size - chunkLen := len(b) - initSize - skippableFrameHeader - b[initSize+1] = uint8(chunkLen >> 0) - b[initSize+2] = uint8(chunkLen >> 8) - b[initSize+3] = uint8(chunkLen >> 16) - //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) - return b -} - -// Load a binary index. -// A zero value Index can be used or a previous one can be reused. -func (i *Index) Load(b []byte) ([]byte, error) { - if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { - return b, io.ErrUnexpectedEOF - } - if b[0] != ChunkTypeIndex { - return b, ErrCorrupt - } - chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 - b = b[4:] - - // Validate we have enough... - if len(b) < chunkLen { - return b, io.ErrUnexpectedEOF - } - if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { - return b, ErrUnsupported - } - b = b[len(S2IndexHeader):] - - // Total Uncompressed - if v, n := binary.Varint(b); n <= 0 || v < 0 { - return b, ErrCorrupt - } else { - i.TotalUncompressed = v - b = b[n:] - } - - // Total Compressed - if v, n := binary.Varint(b); n <= 0 { - return b, ErrCorrupt - } else { - i.TotalCompressed = v - b = b[n:] - } - - // Read EstBlockUncomp - if v, n := binary.Varint(b); n <= 0 { - return b, ErrCorrupt - } else { - if v < 0 { - return b, ErrCorrupt - } - i.estBlockUncomp = v - b = b[n:] - } - - var entries int - if v, n := binary.Varint(b); n <= 0 { - return b, ErrCorrupt - } else { - if v < 0 || v > maxIndexEntries { - return b, ErrCorrupt - } - entries = int(v) - b = b[n:] - } - if cap(i.info) < entries { - i.allocInfos(entries) - } - i.info = i.info[:entries] - - if len(b) < 1 { - return b, io.ErrUnexpectedEOF - } - hasUncompressed := b[0] - b = b[1:] - if hasUncompressed&1 != hasUncompressed { - return b, ErrCorrupt - } - - // Add each uncompressed entry - for idx := range i.info { - var uOff int64 - if hasUncompressed != 0 { - // Load delta - if v, n := binary.Varint(b); n <= 0 { - return b, ErrCorrupt - } else { - uOff = v - b = b[n:] - } - } - - if idx > 0 { - prev := i.info[idx-1].uncompressedOffset - uOff += prev + (i.estBlockUncomp) - if uOff <= prev { - return b, ErrCorrupt - } - } - if uOff < 0 { - return b, ErrCorrupt - } - i.info[idx].uncompressedOffset = uOff - } - - // Initial compressed size estimate. - cPredict := i.estBlockUncomp / 2 - - // Add each compressed entry - for idx := range i.info { - var cOff int64 - if v, n := binary.Varint(b); n <= 0 { - return b, ErrCorrupt - } else { - cOff = v - b = b[n:] - } - - if idx > 0 { - // Update compressed size prediction, with half the error. - cPredictNew := cPredict + cOff/2 - - prev := i.info[idx-1].compressedOffset - cOff += prev + cPredict - if cOff <= prev { - return b, ErrCorrupt - } - cPredict = cPredictNew - } - if cOff < 0 { - return b, ErrCorrupt - } - i.info[idx].compressedOffset = cOff - } - if len(b) < 4+len(S2IndexTrailer) { - return b, io.ErrUnexpectedEOF - } - // Skip size... - b = b[4:] - - // Check trailer... - if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { - return b, ErrCorrupt - } - return b[len(S2IndexTrailer):], nil -} - -// LoadStream will load an index from the end of the supplied stream. -// ErrUnsupported will be returned if the signature cannot be found. -// ErrCorrupt will be returned if unexpected values are found. -// io.ErrUnexpectedEOF is returned if there are too few bytes. -// IO errors are returned as-is. -func (i *Index) LoadStream(rs io.ReadSeeker) error { - // Go to end. - _, err := rs.Seek(-10, io.SeekEnd) - if err != nil { - return err - } - var tmp [10]byte - _, err = io.ReadFull(rs, tmp[:]) - if err != nil { - return err - } - // Check trailer... - if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { - return ErrUnsupported - } - sz := binary.LittleEndian.Uint32(tmp[:4]) - if sz > maxChunkSize+skippableFrameHeader { - return ErrCorrupt - } - _, err = rs.Seek(-int64(sz), io.SeekEnd) - if err != nil { - return err - } - - // Read index. - buf := make([]byte, sz) - _, err = io.ReadFull(rs, buf) - if err != nil { - return err - } - _, err = i.Load(buf) - return err -} - -// IndexStream will return an index for a stream. -// The stream structure will be checked, but -// data within blocks is not verified. -// The returned index can either be appended to the end of the stream -// or stored separately. -func IndexStream(r io.Reader) ([]byte, error) { - var i Index - var buf [maxChunkSize]byte - var readHeader bool - for { - _, err := io.ReadFull(r, buf[:4]) - if err != nil { - if err == io.EOF { - return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil - } - return nil, err - } - // Start of this chunk. - startChunk := i.TotalCompressed - i.TotalCompressed += 4 - - chunkType := buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - return nil, ErrCorrupt - } - readHeader = true - } - chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 - if chunkLen < checksumSize { - return nil, ErrCorrupt - } - - i.TotalCompressed += int64(chunkLen) - _, err = io.ReadFull(r, buf[:chunkLen]) - if err != nil { - return nil, io.ErrUnexpectedEOF - } - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - // Skip checksum. - dLen, err := DecodedLen(buf[checksumSize:]) - if err != nil { - return nil, err - } - if dLen > maxBlockSize { - return nil, ErrCorrupt - } - if i.estBlockUncomp == 0 { - // Use first block for estimate... - i.estBlockUncomp = int64(dLen) - } - err = i.add(startChunk, i.TotalUncompressed) - if err != nil { - return nil, err - } - i.TotalUncompressed += int64(dLen) - continue - case chunkTypeUncompressedData: - n2 := chunkLen - checksumSize - if n2 > maxBlockSize { - return nil, ErrCorrupt - } - if i.estBlockUncomp == 0 { - // Use first block for estimate... - i.estBlockUncomp = int64(n2) - } - err = i.add(startChunk, i.TotalUncompressed) - if err != nil { - return nil, err - } - i.TotalUncompressed += int64(n2) - continue - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - return nil, ErrCorrupt - } - - if string(buf[:len(magicBody)]) != magicBody { - if string(buf[:len(magicBody)]) != magicBodySnappy { - return nil, ErrCorrupt - } - } - - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - return nil, ErrUnsupported - } - if chunkLen > maxChunkSize { - return nil, ErrUnsupported - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - } -} - -// JSON returns the index as JSON text. -func (i *Index) JSON() []byte { - type offset struct { - CompressedOffset int64 `json:"compressed"` - UncompressedOffset int64 `json:"uncompressed"` - } - x := struct { - TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. - TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. - Offsets []offset `json:"offsets"` - EstBlockUncomp int64 `json:"est_block_uncompressed"` - }{ - TotalUncompressed: i.TotalUncompressed, - TotalCompressed: i.TotalCompressed, - EstBlockUncomp: i.estBlockUncomp, - } - for _, v := range i.info { - x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) - } - b, _ := json.MarshalIndent(x, "", " ") - return b -} - -// RemoveIndexHeaders will trim all headers and trailers from a given index. -// This is expected to save 20 bytes. -// These can be restored using RestoreIndexHeaders. -// This removes a layer of security, but is the most compact representation. -// Returns nil if headers contains errors. -// The returned slice references the provided slice. -func RemoveIndexHeaders(b []byte) []byte { - const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 - if len(b) <= save { - return nil - } - if b[0] != ChunkTypeIndex { - return nil - } - chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 - b = b[4:] - - // Validate we have enough... - if len(b) < chunkLen { - return nil - } - b = b[:chunkLen] - - if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { - return nil - } - b = b[len(S2IndexHeader):] - if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { - return nil - } - b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) - - if len(b) < 4 { - return nil - } - return b[:len(b)-4] -} - -// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. -// No error checking is performed on the input. -// If a 0 length slice is sent, it is returned without modification. -func RestoreIndexHeaders(in []byte) []byte { - if len(in) == 0 { - return in - } - b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) - b = append(b, ChunkTypeIndex, 0, 0, 0) - b = append(b, []byte(S2IndexHeader)...) - b = append(b, in...) - - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) - b = append(b, tmp[:4]...) - // Trailer - b = append(b, []byte(S2IndexTrailer)...) - - chunkLen := len(b) - skippableFrameHeader - b[1] = uint8(chunkLen >> 0) - b[2] = uint8(chunkLen >> 8) - b[3] = uint8(chunkLen >> 16) - return b -} diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go deleted file mode 100644 index 46ed908e3..000000000 --- a/vendor/github.com/klauspost/compress/s2/lz4convert.go +++ /dev/null @@ -1,585 +0,0 @@ -// Copyright (c) 2022 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "encoding/binary" - "errors" - "fmt" -) - -// LZ4Converter provides conversion from LZ4 blocks as defined here: -// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md -type LZ4Converter struct { -} - -// ErrDstTooSmall is returned when provided destination is too small. -var ErrDstTooSmall = errors.New("s2: destination too small") - -// ConvertBlock will convert an LZ4 block and append it as an S2 -// block without block length to dst. -// The uncompressed size is returned as well. -// dst must have capacity to contain the entire compressed block. -func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { - if len(src) == 0 { - return dst, 0, nil - } - const debug = false - const inline = true - const lz4MinMatch = 4 - - s, d := 0, len(dst) - dst = dst[:cap(dst)] - if !debug && hasAmd64Asm { - res, sz := cvtLZ4BlockAsm(dst[d:], src) - if res < 0 { - const ( - errCorrupt = -1 - errDstTooSmall = -2 - ) - switch res { - case errCorrupt: - return nil, 0, ErrCorrupt - case errDstTooSmall: - return nil, 0, ErrDstTooSmall - default: - return nil, 0, fmt.Errorf("unexpected result: %d", res) - } - } - if d+sz > len(dst) { - return nil, 0, ErrDstTooSmall - } - return dst[:d+sz], res, nil - } - - dLimit := len(dst) - 10 - var lastOffset uint16 - var uncompressed int - if debug { - fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) - } - - for { - if s >= len(src) { - return dst[:d], 0, ErrCorrupt - } - // Read literal info - token := src[s] - ll := int(token >> 4) - ml := int(lz4MinMatch + (token & 0xf)) - - // If upper nibble is 15, literal length is extended - if token >= 0xf0 { - for { - s++ - if s >= len(src) { - if debug { - fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return dst[:d], 0, ErrCorrupt - } - val := src[s] - ll += int(val) - if val != 255 { - break - } - } - } - // Skip past token - if s+ll >= len(src) { - if debug { - fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) - } - return nil, 0, ErrCorrupt - } - s++ - if ll > 0 { - if d+ll > dLimit { - return nil, 0, ErrDstTooSmall - } - if debug { - fmt.Printf("emit %d literals\n", ll) - } - d += emitLiteralGo(dst[d:], src[s:s+ll]) - s += ll - uncompressed += ll - } - - // Check if we are done... - if s == len(src) && ml == lz4MinMatch { - break - } - // 2 byte offset - if s >= len(src)-2 { - if debug { - fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) - } - return nil, 0, ErrCorrupt - } - offset := binary.LittleEndian.Uint16(src[s:]) - s += 2 - if offset == 0 { - if debug { - fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) - } - return nil, 0, ErrCorrupt - } - if int(offset) > uncompressed { - if debug { - fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) - } - return nil, 0, ErrCorrupt - } - - if ml == lz4MinMatch+15 { - for { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - s++ - ml += int(val) - if val != 255 { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - break - } - } - } - if offset == lastOffset { - if debug { - fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) - } - if !inline { - d += emitRepeat16(dst[d:], offset, ml) - } else { - length := ml - dst := dst[d:] - for len(dst) > 5 { - // Repeat offset, make length cheaper - length -= 4 - if length <= 4 { - dst[0] = uint8(length)<<2 | tagCopy1 - dst[1] = 0 - d += 2 - break - } - if length < 8 && offset < 2048 { - // Encode WITH offset - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 - d += 2 - break - } - if length < (1<<8)+4 { - length -= 4 - dst[2] = uint8(length) - dst[1] = 0 - dst[0] = 5<<2 | tagCopy1 - d += 3 - break - } - if length < (1<<16)+(1<<8) { - length -= 1 << 8 - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 6<<2 | tagCopy1 - d += 4 - break - } - const maxRepeat = (1 << 24) - 1 - length -= 1 << 16 - left := 0 - if length > maxRepeat { - left = length - maxRepeat + 4 - length = maxRepeat - 4 - } - dst[4] = uint8(length >> 16) - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 7<<2 | tagCopy1 - if left > 0 { - d += 5 + emitRepeat16(dst[5:], offset, left) - break - } - d += 5 - break - } - } - } else { - if debug { - fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) - } - if !inline { - d += emitCopy16(dst[d:], offset, ml) - } else { - length := ml - dst := dst[d:] - for len(dst) > 5 { - // Offset no more than 2 bytes. - if length > 64 { - off := 3 - if offset < 2048 { - // emit 8 bytes as tagCopy1, rest as repeats. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 - length -= 8 - off = 2 - } else { - // Emit a length 60 copy, encoded as 3 bytes. - // Emit remaining as repeat value (minimum 4 bytes). - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 59<<2 | tagCopy2 - length -= 60 - } - // Emit remaining as repeats, at least 4 bytes remain. - d += off + emitRepeat16(dst[off:], offset, length) - break - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = uint8(length-1)<<2 | tagCopy2 - d += 3 - break - } - // Emit the remaining copy, encoded as 2 bytes. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - d += 2 - break - } - } - lastOffset = offset - } - uncompressed += ml - if d > dLimit { - return nil, 0, ErrDstTooSmall - } - } - - return dst[:d], uncompressed, nil -} - -// ConvertBlockSnappy will convert an LZ4 block and append it -// as a Snappy block without block length to dst. -// The uncompressed size is returned as well. -// dst must have capacity to contain the entire compressed block. -func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { - if len(src) == 0 { - return dst, 0, nil - } - const debug = false - const lz4MinMatch = 4 - - s, d := 0, len(dst) - dst = dst[:cap(dst)] - // Use assembly when possible - if !debug && hasAmd64Asm { - res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) - if res < 0 { - const ( - errCorrupt = -1 - errDstTooSmall = -2 - ) - switch res { - case errCorrupt: - return nil, 0, ErrCorrupt - case errDstTooSmall: - return nil, 0, ErrDstTooSmall - default: - return nil, 0, fmt.Errorf("unexpected result: %d", res) - } - } - if d+sz > len(dst) { - return nil, 0, ErrDstTooSmall - } - return dst[:d+sz], res, nil - } - - dLimit := len(dst) - 10 - var uncompressed int - if debug { - fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) - } - - for { - if s >= len(src) { - return nil, 0, ErrCorrupt - } - // Read literal info - token := src[s] - ll := int(token >> 4) - ml := int(lz4MinMatch + (token & 0xf)) - - // If upper nibble is 15, literal length is extended - if token >= 0xf0 { - for { - s++ - if s >= len(src) { - if debug { - fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - ll += int(val) - if val != 255 { - break - } - } - } - // Skip past token - if s+ll >= len(src) { - if debug { - fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) - } - return nil, 0, ErrCorrupt - } - s++ - if ll > 0 { - if d+ll > dLimit { - return nil, 0, ErrDstTooSmall - } - if debug { - fmt.Printf("emit %d literals\n", ll) - } - d += emitLiteralGo(dst[d:], src[s:s+ll]) - s += ll - uncompressed += ll - } - - // Check if we are done... - if s == len(src) && ml == lz4MinMatch { - break - } - // 2 byte offset - if s >= len(src)-2 { - if debug { - fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) - } - return nil, 0, ErrCorrupt - } - offset := binary.LittleEndian.Uint16(src[s:]) - s += 2 - if offset == 0 { - if debug { - fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) - } - return nil, 0, ErrCorrupt - } - if int(offset) > uncompressed { - if debug { - fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) - } - return nil, 0, ErrCorrupt - } - - if ml == lz4MinMatch+15 { - for { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - s++ - ml += int(val) - if val != 255 { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - break - } - } - } - if debug { - fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) - } - length := ml - // d += emitCopyNoRepeat(dst[d:], int(offset), ml) - for length > 0 { - if d >= dLimit { - return nil, 0, ErrDstTooSmall - } - - // Offset no more than 2 bytes. - if length > 64 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[d+2] = uint8(offset >> 8) - dst[d+1] = uint8(offset) - dst[d+0] = 63<<2 | tagCopy2 - length -= 64 - d += 3 - continue - } - if length >= 12 || offset >= 2048 || length < 4 { - // Emit the remaining copy, encoded as 3 bytes. - dst[d+2] = uint8(offset >> 8) - dst[d+1] = uint8(offset) - dst[d+0] = uint8(length-1)<<2 | tagCopy2 - d += 3 - break - } - // Emit the remaining copy, encoded as 2 bytes. - dst[d+1] = uint8(offset) - dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - d += 2 - break - } - uncompressed += ml - if d > dLimit { - return nil, 0, ErrDstTooSmall - } - } - - return dst[:d], uncompressed, nil -} - -// emitRepeat writes a repeat chunk and returns the number of bytes written. -// Length must be at least 4 and < 1<<24 -func emitRepeat16(dst []byte, offset uint16, length int) int { - // Repeat offset, make length cheaper - length -= 4 - if length <= 4 { - dst[0] = uint8(length)<<2 | tagCopy1 - dst[1] = 0 - return 2 - } - if length < 8 && offset < 2048 { - // Encode WITH offset - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 - return 2 - } - if length < (1<<8)+4 { - length -= 4 - dst[2] = uint8(length) - dst[1] = 0 - dst[0] = 5<<2 | tagCopy1 - return 3 - } - if length < (1<<16)+(1<<8) { - length -= 1 << 8 - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 6<<2 | tagCopy1 - return 4 - } - const maxRepeat = (1 << 24) - 1 - length -= 1 << 16 - left := 0 - if length > maxRepeat { - left = length - maxRepeat + 4 - length = maxRepeat - 4 - } - dst[4] = uint8(length >> 16) - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 7<<2 | tagCopy1 - if left > 0 { - return 5 + emitRepeat16(dst[5:], offset, left) - } - return 5 -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= math.MaxUint16 -// 4 <= length && length <= math.MaxUint32 -func emitCopy16(dst []byte, offset uint16, length int) int { - // Offset no more than 2 bytes. - if length > 64 { - off := 3 - if offset < 2048 { - // emit 8 bytes as tagCopy1, rest as repeats. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 - length -= 8 - off = 2 - } else { - // Emit a length 60 copy, encoded as 3 bytes. - // Emit remaining as repeat value (minimum 4 bytes). - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 59<<2 | tagCopy2 - length -= 60 - } - // Emit remaining as repeats, at least 4 bytes remain. - return off + emitRepeat16(dst[off:], offset, length) - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = uint8(length-1)<<2 | tagCopy2 - return 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - return 2 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 0 <= len(lit) && len(lit) <= math.MaxUint32 -func emitLiteralGo(dst, lit []byte) int { - if len(lit) == 0 { - return 0 - } - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[1] = uint8(n) - dst[0] = 60<<2 | tagLiteral - i = 2 - case n < 1<<16: - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 61<<2 | tagLiteral - i = 3 - case n < 1<<24: - dst[3] = uint8(n >> 16) - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 62<<2 | tagLiteral - i = 4 - default: - dst[4] = uint8(n >> 24) - dst[3] = uint8(n >> 16) - dst[2] = uint8(n >> 8) - dst[1] = uint8(n) - dst[0] = 63<<2 | tagLiteral - i = 5 - } - return i + copy(dst[i:], lit) -} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go deleted file mode 100644 index 000f39719..000000000 --- a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go +++ /dev/null @@ -1,467 +0,0 @@ -// Copyright (c) 2022 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "encoding/binary" - "fmt" -) - -// LZ4sConverter provides conversion from LZ4s. -// (Intel modified LZ4 Blocks) -// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf -// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. -// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. -// The LZ4s block returned by the Intel® QAT hardware can be used by an external -// software post-processing to generate other compressed data formats. -// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses -// the same high-level formatting as LZ4 block format with the following encoding changes: -// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. -// ONLY "Min match of 4 bytes" is supported. -type LZ4sConverter struct { -} - -// ConvertBlock will convert an LZ4s block and append it as an S2 -// block without block length to dst. -// The uncompressed size is returned as well. -// dst must have capacity to contain the entire compressed block. -func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { - if len(src) == 0 { - return dst, 0, nil - } - const debug = false - const inline = true - const lz4MinMatch = 3 - - s, d := 0, len(dst) - dst = dst[:cap(dst)] - if !debug && hasAmd64Asm { - res, sz := cvtLZ4sBlockAsm(dst[d:], src) - if res < 0 { - const ( - errCorrupt = -1 - errDstTooSmall = -2 - ) - switch res { - case errCorrupt: - return nil, 0, ErrCorrupt - case errDstTooSmall: - return nil, 0, ErrDstTooSmall - default: - return nil, 0, fmt.Errorf("unexpected result: %d", res) - } - } - if d+sz > len(dst) { - return nil, 0, ErrDstTooSmall - } - return dst[:d+sz], res, nil - } - - dLimit := len(dst) - 10 - var lastOffset uint16 - var uncompressed int - if debug { - fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) - } - - for { - if s >= len(src) { - return dst[:d], 0, ErrCorrupt - } - // Read literal info - token := src[s] - ll := int(token >> 4) - ml := int(lz4MinMatch + (token & 0xf)) - - // If upper nibble is 15, literal length is extended - if token >= 0xf0 { - for { - s++ - if s >= len(src) { - if debug { - fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return dst[:d], 0, ErrCorrupt - } - val := src[s] - ll += int(val) - if val != 255 { - break - } - } - } - // Skip past token - if s+ll >= len(src) { - if debug { - fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) - } - return nil, 0, ErrCorrupt - } - s++ - if ll > 0 { - if d+ll > dLimit { - return nil, 0, ErrDstTooSmall - } - if debug { - fmt.Printf("emit %d literals\n", ll) - } - d += emitLiteralGo(dst[d:], src[s:s+ll]) - s += ll - uncompressed += ll - } - - // Check if we are done... - if ml == lz4MinMatch { - if s == len(src) { - break - } - // 0 bytes. - continue - } - // 2 byte offset - if s >= len(src)-2 { - if debug { - fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) - } - return nil, 0, ErrCorrupt - } - offset := binary.LittleEndian.Uint16(src[s:]) - s += 2 - if offset == 0 { - if debug { - fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) - } - return nil, 0, ErrCorrupt - } - if int(offset) > uncompressed { - if debug { - fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) - } - return nil, 0, ErrCorrupt - } - - if ml == lz4MinMatch+15 { - for { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - s++ - ml += int(val) - if val != 255 { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - break - } - } - } - if offset == lastOffset { - if debug { - fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) - } - if !inline { - d += emitRepeat16(dst[d:], offset, ml) - } else { - length := ml - dst := dst[d:] - for len(dst) > 5 { - // Repeat offset, make length cheaper - length -= 4 - if length <= 4 { - dst[0] = uint8(length)<<2 | tagCopy1 - dst[1] = 0 - d += 2 - break - } - if length < 8 && offset < 2048 { - // Encode WITH offset - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 - d += 2 - break - } - if length < (1<<8)+4 { - length -= 4 - dst[2] = uint8(length) - dst[1] = 0 - dst[0] = 5<<2 | tagCopy1 - d += 3 - break - } - if length < (1<<16)+(1<<8) { - length -= 1 << 8 - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 6<<2 | tagCopy1 - d += 4 - break - } - const maxRepeat = (1 << 24) - 1 - length -= 1 << 16 - left := 0 - if length > maxRepeat { - left = length - maxRepeat + 4 - length = maxRepeat - 4 - } - dst[4] = uint8(length >> 16) - dst[3] = uint8(length >> 8) - dst[2] = uint8(length >> 0) - dst[1] = 0 - dst[0] = 7<<2 | tagCopy1 - if left > 0 { - d += 5 + emitRepeat16(dst[5:], offset, left) - break - } - d += 5 - break - } - } - } else { - if debug { - fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) - } - if !inline { - d += emitCopy16(dst[d:], offset, ml) - } else { - length := ml - dst := dst[d:] - for len(dst) > 5 { - // Offset no more than 2 bytes. - if length > 64 { - off := 3 - if offset < 2048 { - // emit 8 bytes as tagCopy1, rest as repeats. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 - length -= 8 - off = 2 - } else { - // Emit a length 60 copy, encoded as 3 bytes. - // Emit remaining as repeat value (minimum 4 bytes). - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = 59<<2 | tagCopy2 - length -= 60 - } - // Emit remaining as repeats, at least 4 bytes remain. - d += off + emitRepeat16(dst[off:], offset, length) - break - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[2] = uint8(offset >> 8) - dst[1] = uint8(offset) - dst[0] = uint8(length-1)<<2 | tagCopy2 - d += 3 - break - } - // Emit the remaining copy, encoded as 2 bytes. - dst[1] = uint8(offset) - dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - d += 2 - break - } - } - lastOffset = offset - } - uncompressed += ml - if d > dLimit { - return nil, 0, ErrDstTooSmall - } - } - - return dst[:d], uncompressed, nil -} - -// ConvertBlockSnappy will convert an LZ4s block and append it -// as a Snappy block without block length to dst. -// The uncompressed size is returned as well. -// dst must have capacity to contain the entire compressed block. -func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { - if len(src) == 0 { - return dst, 0, nil - } - const debug = false - const lz4MinMatch = 3 - - s, d := 0, len(dst) - dst = dst[:cap(dst)] - // Use assembly when possible - if !debug && hasAmd64Asm { - res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) - if res < 0 { - const ( - errCorrupt = -1 - errDstTooSmall = -2 - ) - switch res { - case errCorrupt: - return nil, 0, ErrCorrupt - case errDstTooSmall: - return nil, 0, ErrDstTooSmall - default: - return nil, 0, fmt.Errorf("unexpected result: %d", res) - } - } - if d+sz > len(dst) { - return nil, 0, ErrDstTooSmall - } - return dst[:d+sz], res, nil - } - - dLimit := len(dst) - 10 - var uncompressed int - if debug { - fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) - } - - for { - if s >= len(src) { - return nil, 0, ErrCorrupt - } - // Read literal info - token := src[s] - ll := int(token >> 4) - ml := int(lz4MinMatch + (token & 0xf)) - - // If upper nibble is 15, literal length is extended - if token >= 0xf0 { - for { - s++ - if s >= len(src) { - if debug { - fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - ll += int(val) - if val != 255 { - break - } - } - } - // Skip past token - if s+ll >= len(src) { - if debug { - fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) - } - return nil, 0, ErrCorrupt - } - s++ - if ll > 0 { - if d+ll > dLimit { - return nil, 0, ErrDstTooSmall - } - if debug { - fmt.Printf("emit %d literals\n", ll) - } - d += emitLiteralGo(dst[d:], src[s:s+ll]) - s += ll - uncompressed += ll - } - - // Check if we are done... - if ml == lz4MinMatch { - if s == len(src) { - break - } - // 0 bytes. - continue - } - // 2 byte offset - if s >= len(src)-2 { - if debug { - fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) - } - return nil, 0, ErrCorrupt - } - offset := binary.LittleEndian.Uint16(src[s:]) - s += 2 - if offset == 0 { - if debug { - fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) - } - return nil, 0, ErrCorrupt - } - if int(offset) > uncompressed { - if debug { - fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) - } - return nil, 0, ErrCorrupt - } - - if ml == lz4MinMatch+15 { - for { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - val := src[s] - s++ - ml += int(val) - if val != 255 { - if s >= len(src) { - if debug { - fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) - } - return nil, 0, ErrCorrupt - } - break - } - } - } - if debug { - fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) - } - length := ml - // d += emitCopyNoRepeat(dst[d:], int(offset), ml) - for length > 0 { - if d >= dLimit { - return nil, 0, ErrDstTooSmall - } - - // Offset no more than 2 bytes. - if length > 64 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[d+2] = uint8(offset >> 8) - dst[d+1] = uint8(offset) - dst[d+0] = 63<<2 | tagCopy2 - length -= 64 - d += 3 - continue - } - if length >= 12 || offset >= 2048 || length < 4 { - // Emit the remaining copy, encoded as 3 bytes. - dst[d+2] = uint8(offset >> 8) - dst[d+1] = uint8(offset) - dst[d+0] = uint8(length-1)<<2 | tagCopy2 - d += 3 - break - } - // Emit the remaining copy, encoded as 2 bytes. - dst[d+1] = uint8(offset) - dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - d += 2 - break - } - uncompressed += ml - if d > dLimit { - return nil, 0, ErrDstTooSmall - } - } - - return dst[:d], uncompressed, nil -} diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go deleted file mode 100644 index 8372d752f..000000000 --- a/vendor/github.com/klauspost/compress/s2/reader.go +++ /dev/null @@ -1,1075 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019+ Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "runtime" - "sync" -) - -// ErrCantSeek is returned if the stream cannot be seeked. -type ErrCantSeek struct { - Reason string -} - -// Error returns the error as string. -func (e ErrCantSeek) Error() string { - return fmt.Sprintf("s2: Can't seek because %s", e.Reason) -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. -func NewReader(r io.Reader, opts ...ReaderOption) *Reader { - nr := Reader{ - r: r, - maxBlock: maxBlockSize, - } - for _, opt := range opts { - if err := opt(&nr); err != nil { - nr.err = err - return &nr - } - } - nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize - if nr.lazyBuf > 0 { - nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) - } else { - nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) - } - nr.readHeader = nr.ignoreStreamID - nr.paramsOK = true - return &nr -} - -// ReaderOption is an option for creating a decoder. -type ReaderOption func(*Reader) error - -// ReaderMaxBlockSize allows to control allocations if the stream -// has been compressed with a smaller WriterBlockSize, or with the default 1MB. -// Blocks must be this size or smaller to decompress, -// otherwise the decoder will return ErrUnsupported. -// -// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). -// -// Default is the maximum limit of 4MB. -func ReaderMaxBlockSize(blockSize int) ReaderOption { - return func(r *Reader) error { - if blockSize > maxBlockSize || blockSize <= 0 { - return errors.New("s2: block size too large. Must be <= 4MB and > 0") - } - if r.lazyBuf == 0 && blockSize < defaultBlockSize { - r.lazyBuf = blockSize - } - r.maxBlock = blockSize - return nil - } -} - -// ReaderAllocBlock allows to control upfront stream allocations -// and not allocate for frames bigger than this initially. -// If frames bigger than this is seen a bigger buffer will be allocated. -// -// Default is 1MB, which is default output size. -func ReaderAllocBlock(blockSize int) ReaderOption { - return func(r *Reader) error { - if blockSize > maxBlockSize || blockSize < 1024 { - return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") - } - r.lazyBuf = blockSize - return nil - } -} - -// ReaderIgnoreStreamIdentifier will make the reader skip the expected -// stream identifier at the beginning of the stream. -// This can be used when serving a stream that has been forwarded to a specific point. -func ReaderIgnoreStreamIdentifier() ReaderOption { - return func(r *Reader) error { - r.ignoreStreamID = true - return nil - } -} - -// ReaderSkippableCB will register a callback for chuncks with the specified ID. -// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). -// For each chunk with the ID, the callback is called with the content. -// Any returned non-nil error will abort decompression. -// Only one callback per ID is supported, latest sent will be used. -// You can peek the stream, triggering the callback, by doing a Read with a 0 -// byte buffer. -func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { - return func(r *Reader) error { - if id < 0x80 || id > 0xfd { - return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") - } - r.skippableCB[id-0x80] = fn - return nil - } -} - -// ReaderIgnoreCRC will make the reader skip CRC calculation and checks. -func ReaderIgnoreCRC() ReaderOption { - return func(r *Reader) error { - r.ignoreCRC = true - return nil - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - skippableCB [0xff - 0x80]func(r io.Reader) error - blockStart int64 // Uncompressed offset at start of current. - index *Index - - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - // maximum block size allowed. - maxBlock int - // maximum expected buffer size. - maxBufSize int - // alloc a buffer this size if > 0. - lazyBuf int - readHeader bool - paramsOK bool - snappyFrame bool - ignoreStreamID bool - ignoreCRC bool -} - -// GetBufferCapacity returns the capacity of the internal buffer. -// This might be useful to know when reusing the same reader in combination -// with the lazy buffer option. -func (r *Reader) GetBufferCapacity() int { - return cap(r.buf) -} - -// ensureBufferSize will ensure that the buffer can take at least n bytes. -// If false is returned the buffer exceeds maximum allowed size. -func (r *Reader) ensureBufferSize(n int) bool { - if n > r.maxBufSize { - r.err = ErrCorrupt - return false - } - if cap(r.buf) >= n { - return true - } - // Realloc buffer. - r.buf = make([]byte, n) - return true -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - if !r.paramsOK { - return - } - r.index = nil - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.blockStart = 0 - r.readHeader = r.ignoreStreamID -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// skippable will skip n bytes. -// If the supplied reader supports seeking that is used. -// tmp is used as a temporary buffer for reading. -// The supplied slice does not need to be the size of the read. -func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { - if id < 0x80 { - r.err = fmt.Errorf("internal error: skippable id < 0x80") - return false - } - if fn := r.skippableCB[id-0x80]; fn != nil { - rd := io.LimitReader(r.r, int64(n)) - r.err = fn(rd) - if r.err != nil { - return false - } - _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) - return r.err == nil - } - if rs, ok := r.r.(io.ReadSeeker); ok { - _, err := rs.Seek(int64(n), io.SeekCurrent) - if err == nil { - return true - } - if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - return false - } - } - for n > 0 { - if n < len(tmp) { - tmp = tmp[:n] - } - if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - n -= len(tmp) - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - r.blockStart += int64(r.j) - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - if !r.ensureBufferSize(chunkLen) { - if r.err == nil { - r.err = ErrUnsupported - } - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if r.snappyFrame && n > maxSnappyBlockSize { - r.err = ErrCorrupt - return 0, r.err - } - - if n > len(r.decoded) { - if n > r.maxBlock { - r.err = ErrCorrupt - return 0, r.err - } - r.decoded = make([]byte, n) - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { - r.err = ErrCRC - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - r.blockStart += int64(r.j) - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - if !r.ensureBufferSize(chunkLen) { - if r.err == nil { - r.err = ErrUnsupported - } - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if r.snappyFrame && n > maxSnappyBlockSize { - r.err = ErrCorrupt - return 0, r.err - } - if n > len(r.decoded) { - if n > r.maxBlock { - r.err = ErrCorrupt - return 0, r.err - } - r.decoded = make([]byte, n) - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { - r.err = ErrCRC - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - if string(r.buf[:len(magicBody)]) != magicBody { - if string(r.buf[:len(magicBody)]) != magicBodySnappy { - r.err = ErrCorrupt - return 0, r.err - } else { - r.snappyFrame = true - } - } else { - r.snappyFrame = false - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if chunkLen > maxChunkSize { - // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) - r.err = ErrUnsupported - return 0, r.err - } - - // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) - if !r.skippable(r.buf, chunkLen, false, chunkType) { - return 0, r.err - } - } -} - -// DecodeConcurrent will decode the full stream to w. -// This function should not be combined with reading, seeking or other operations. -// Up to 'concurrent' goroutines will be used. -// If <= 0, runtime.NumCPU will be used. -// On success the number of bytes decompressed nil and is returned. -// This is mainly intended for bigger streams. -func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) { - if r.i > 0 || r.j > 0 || r.blockStart > 0 { - return 0, errors.New("DecodeConcurrent called after ") - } - if concurrent <= 0 { - concurrent = runtime.NumCPU() - } - - // Write to output - var errMu sync.Mutex - var aErr error - setErr := func(e error) (ok bool) { - errMu.Lock() - defer errMu.Unlock() - if e == nil { - return aErr == nil - } - if aErr == nil { - aErr = e - } - return false - } - hasErr := func() (ok bool) { - errMu.Lock() - v := aErr != nil - errMu.Unlock() - return v - } - - var aWritten int64 - toRead := make(chan []byte, concurrent) - writtenBlocks := make(chan []byte, concurrent) - queue := make(chan chan []byte, concurrent) - reUse := make(chan chan []byte, concurrent) - for i := 0; i < concurrent; i++ { - toRead <- make([]byte, 0, r.maxBufSize) - writtenBlocks <- make([]byte, 0, r.maxBufSize) - reUse <- make(chan []byte, 1) - } - // Writer - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for toWrite := range queue { - entry := <-toWrite - reUse <- toWrite - if hasErr() || entry == nil { - if entry != nil { - writtenBlocks <- entry - } - continue - } - if hasErr() { - writtenBlocks <- entry - continue - } - n, err := w.Write(entry) - want := len(entry) - writtenBlocks <- entry - if err != nil { - setErr(err) - continue - } - if n != want { - setErr(io.ErrShortWrite) - continue - } - aWritten += int64(n) - } - }() - - defer func() { - if r.err != nil { - setErr(r.err) - } else if err != nil { - setErr(err) - } - close(queue) - wg.Wait() - if err == nil { - err = aErr - } - written = aWritten - }() - - // Reader - for !hasErr() { - if !r.readFull(r.buf[:4], true) { - if r.err == io.EOF { - r.err = nil - } - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - r.blockStart += int64(r.j) - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - if chunkLen > r.maxBufSize { - r.err = ErrCorrupt - return 0, r.err - } - orgBuf := <-toRead - buf := orgBuf[:chunkLen] - - if !r.readFull(buf, false) { - return 0, r.err - } - - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if r.snappyFrame && n > maxSnappyBlockSize { - r.err = ErrCorrupt - return 0, r.err - } - - if n > r.maxBlock { - r.err = ErrCorrupt - return 0, r.err - } - wg.Add(1) - - decoded := <-writtenBlocks - entry := <-reUse - queue <- entry - go func() { - defer wg.Done() - decoded = decoded[:n] - _, err := Decode(decoded, buf) - toRead <- orgBuf - if err != nil { - writtenBlocks <- decoded - setErr(err) - entry <- nil - return - } - if !r.ignoreCRC && crc(decoded) != checksum { - writtenBlocks <- decoded - setErr(ErrCRC) - entry <- nil - return - } - entry <- decoded - }() - continue - - case chunkTypeUncompressedData: - - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - if chunkLen > r.maxBufSize { - r.err = ErrCorrupt - return 0, r.err - } - // Grab write buffer - orgBuf := <-writtenBlocks - buf := orgBuf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read content. - n := chunkLen - checksumSize - - if r.snappyFrame && n > maxSnappyBlockSize { - r.err = ErrCorrupt - return 0, r.err - } - if n > r.maxBlock { - r.err = ErrCorrupt - return 0, r.err - } - // Read uncompressed - buf = orgBuf[:n] - if !r.readFull(buf, false) { - return 0, r.err - } - - if !r.ignoreCRC && crc(buf) != checksum { - r.err = ErrCRC - return 0, r.err - } - entry := <-reUse - queue <- entry - entry <- buf - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - if string(r.buf[:len(magicBody)]) != magicBody { - if string(r.buf[:len(magicBody)]) != magicBodySnappy { - r.err = ErrCorrupt - return 0, r.err - } else { - r.snappyFrame = true - } - } else { - r.snappyFrame = false - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if chunkLen > maxChunkSize { - // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) - r.err = ErrUnsupported - return 0, r.err - } - - // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) - if !r.skippable(r.buf, chunkLen, false, chunkType) { - return 0, r.err - } - } - return 0, r.err -} - -// Skip will skip n bytes forward in the decompressed output. -// For larger skips this consumes less CPU and is faster than reading output and discarding it. -// CRC is not checked on skipped blocks. -// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. -// If a decoding error is encountered subsequent calls to Read will also fail. -func (r *Reader) Skip(n int64) error { - if n < 0 { - return errors.New("attempted negative skip") - } - if r.err != nil { - return r.err - } - - for n > 0 { - if r.i < r.j { - // Skip in buffer. - // decoded[i:j] contains decoded bytes that have not yet been passed on. - left := int64(r.j - r.i) - if left >= n { - tmp := int64(r.i) + n - if tmp > math.MaxInt32 { - return errors.New("s2: internal overflow in skip") - } - r.i = int(tmp) - return nil - } - n -= int64(r.j - r.i) - r.i = r.j - } - - // Buffer empty; read blocks until we have content. - if !r.readFull(r.buf[:4], true) { - if r.err == io.EOF { - r.err = io.ErrUnexpectedEOF - } - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - r.blockStart += int64(r.j) - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - if !r.ensureBufferSize(chunkLen) { - if r.err == nil { - r.err = ErrUnsupported - } - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - dLen, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if dLen > r.maxBlock { - r.err = ErrCorrupt - return r.err - } - // Check if destination is within this block - if int64(dLen) > n { - if len(r.decoded) < dLen { - r.decoded = make([]byte, dLen) - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:dLen]) != checksum { - r.err = ErrCorrupt - return r.err - } - } else { - // Skip block completely - n -= int64(dLen) - r.blockStart += int64(dLen) - dLen = 0 - } - r.i, r.j = 0, dLen - continue - case chunkTypeUncompressedData: - r.blockStart += int64(r.j) - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - if !r.ensureBufferSize(chunkLen) { - if r.err != nil { - r.err = ErrUnsupported - } - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n2 := chunkLen - checksumSize - if n2 > len(r.decoded) { - if n2 > r.maxBlock { - r.err = ErrCorrupt - return r.err - } - r.decoded = make([]byte, n2) - } - if !r.readFull(r.decoded[:n2], false) { - return r.err - } - if int64(n2) < n { - if crc(r.decoded[:n2]) != checksum { - r.err = ErrCorrupt - return r.err - } - } - r.i, r.j = 0, n2 - continue - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - if string(r.buf[:len(magicBody)]) != magicBody { - if string(r.buf[:len(magicBody)]) != magicBodySnappy { - r.err = ErrCorrupt - return r.err - } - } - - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - if chunkLen > maxChunkSize { - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.skippable(r.buf, chunkLen, false, chunkType) { - return r.err - } - } - return nil -} - -// ReadSeeker provides random or forward seeking in compressed content. -// See Reader.ReadSeeker -type ReadSeeker struct { - *Reader - readAtMu sync.Mutex -} - -// ReadSeeker will return an io.ReadSeeker and io.ReaderAt -// compatible version of the reader. -// If 'random' is specified the returned io.Seeker can be used for -// random seeking, otherwise only forward seeking is supported. -// Enabling random seeking requires the original input to support -// the io.Seeker interface. -// A custom index can be specified which will be used if supplied. -// When using a custom index, it will not be read from the input stream. -// The ReadAt position will affect regular reads and the current position of Seek. -// So using Read after ReadAt will continue from where the ReadAt stopped. -// No functions should be used concurrently. -// The returned ReadSeeker contains a shallow reference to the existing Reader, -// meaning changes performed to one is reflected in the other. -func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { - // Read index if provided. - if len(index) != 0 { - if r.index == nil { - r.index = &Index{} - } - if _, err := r.index.Load(index); err != nil { - return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} - } - } - - // Check if input is seekable - rs, ok := r.r.(io.ReadSeeker) - if !ok { - if !random { - return &ReadSeeker{Reader: r}, nil - } - return nil, ErrCantSeek{Reason: "input stream isn't seekable"} - } - - if r.index != nil { - // Seekable and index, ok... - return &ReadSeeker{Reader: r}, nil - } - - // Load from stream. - r.index = &Index{} - - // Read current position. - pos, err := rs.Seek(0, io.SeekCurrent) - if err != nil { - return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} - } - err = r.index.LoadStream(rs) - if err != nil { - if err == ErrUnsupported { - // If we don't require random seeking, reset input and return. - if !random { - _, err = rs.Seek(pos, io.SeekStart) - if err != nil { - return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()} - } - r.index = nil - return &ReadSeeker{Reader: r}, nil - } - return nil, ErrCantSeek{Reason: "input stream does not contain an index"} - } - return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} - } - - // reset position. - _, err = rs.Seek(pos, io.SeekStart) - if err != nil { - return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} - } - return &ReadSeeker{Reader: r}, nil -} - -// Seek allows seeking in compressed data. -func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { - if r.err != nil { - if !errors.Is(r.err, io.EOF) { - return 0, r.err - } - // Reset on EOF - r.err = nil - } - - // Calculate absolute offset. - absOffset := offset - - switch whence { - case io.SeekStart: - case io.SeekCurrent: - absOffset = r.blockStart + int64(r.i) + offset - case io.SeekEnd: - if r.index == nil { - return 0, ErrUnsupported - } - absOffset = r.index.TotalUncompressed + offset - default: - r.err = ErrUnsupported - return 0, r.err - } - - if absOffset < 0 { - return 0, errors.New("seek before start of file") - } - - if !r.readHeader { - // Make sure we read the header. - _, r.err = r.Read([]byte{}) - if r.err != nil { - return 0, r.err - } - } - - // If we are inside current block no need to seek. - // This includes no offset changes. - if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { - r.i = int(absOffset - r.blockStart) - return r.blockStart + int64(r.i), nil - } - - rs, ok := r.r.(io.ReadSeeker) - if r.index == nil || !ok { - currOffset := r.blockStart + int64(r.i) - if absOffset >= currOffset { - err := r.Skip(absOffset - currOffset) - return r.blockStart + int64(r.i), err - } - return 0, ErrUnsupported - } - - // We can seek and we have an index. - c, u, err := r.index.Find(absOffset) - if err != nil { - return r.blockStart + int64(r.i), err - } - - // Seek to next block - _, err = rs.Seek(c, io.SeekStart) - if err != nil { - return 0, err - } - - r.i = r.j // Remove rest of current block. - r.blockStart = u - int64(r.j) // Adjust current block start for accounting. - if u < absOffset { - // Forward inside block - return absOffset, r.Skip(absOffset - u) - } - if u > absOffset { - return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) - } - return absOffset, nil -} - -// ReadAt reads len(p) bytes into p starting at offset off in the -// underlying input source. It returns the number of bytes -// read (0 <= n <= len(p)) and any error encountered. -// -// When ReadAt returns n < len(p), it returns a non-nil error -// explaining why more bytes were not returned. In this respect, -// ReadAt is stricter than Read. -// -// Even if ReadAt returns n < len(p), it may use all of p as scratch -// space during the call. If some data is available but not len(p) bytes, -// ReadAt blocks until either all the data is available or an error occurs. -// In this respect ReadAt is different from Read. -// -// If the n = len(p) bytes returned by ReadAt are at the end of the -// input source, ReadAt may return either err == EOF or err == nil. -// -// If ReadAt is reading from an input source with a seek offset, -// ReadAt should not affect nor be affected by the underlying -// seek offset. -// -// Clients of ReadAt can execute parallel ReadAt calls on the -// same input source. This is however not recommended. -func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { - r.readAtMu.Lock() - defer r.readAtMu.Unlock() - _, err := r.Seek(offset, io.SeekStart) - if err != nil { - return 0, err - } - n := 0 - for n < len(p) { - n2, err := r.Read(p[n:]) - if err != nil { - // This will include io.EOF - return n + n2, err - } - n += n2 - } - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - if r.i < r.j { - c := r.decoded[r.i] - r.i++ - return c, nil - } - var tmp [1]byte - for i := 0; i < 10; i++ { - n, err := r.Read(tmp[:]) - if err != nil { - return 0, err - } - if n == 1 { - return tmp[0], nil - } - } - return 0, io.ErrNoProgress -} - -// SkippableCB will register a callback for chunks with the specified ID. -// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). -// For each chunk with the ID, the callback is called with the content. -// Any returned non-nil error will abort decompression. -// Only one callback per ID is supported, latest sent will be used. -// Sending a nil function will disable previous callbacks. -// You can peek the stream, triggering the callback, by doing a Read with a 0 -// byte buffer. -func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { - if id < 0x80 || id >= chunkTypePadding { - return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") - } - r.skippableCB[id-0x80] = fn - return nil -} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go deleted file mode 100644 index cbd1ed64d..000000000 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2 implements the S2 compression format. -// -// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, -// which is why it features concurrent compression for bigger payloads. -// -// Decoding is compatible with Snappy compressed content, -// but content compressed with S2 cannot be decompressed by Snappy. -// -// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 -// -// There are actually two S2 formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a S2 stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// A "better" compression option is available. This will trade some compression -// speed -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// Blocks to not offer much data protection, so it is up to you to -// add data validation of decompressed blocks. -// -// Streams perform CRC validation of the decompressed data. -// Stream compression will also be performed on multiple CPU cores concurrently -// significantly improving throughput. -package s2 - -import ( - "bytes" - "hash/crc32" - - "github.com/klauspost/compress/internal/race" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy - magicBodySnappy = "sNaPpY" - magicBody = "S2sTwO" - - // maxBlockSize is the maximum size of the input to encodeBlock. - // - // For the framing format (Writer type instead of Encode function), - // this is the maximum uncompressed size of a block. - maxBlockSize = 4 << 20 - - // minBlockSize is the minimum size of block setting when creating a writer. - minBlockSize = 4 << 10 - - skippableFrameHeader = 4 - maxChunkSize = 1<<24 - 1 // 16777215 - - // Default block size - defaultBlockSize = 1 << 20 - - // maxSnappyBlockSize is the maximum snappy block size. - maxSnappyBlockSize = 1 << 16 - - obufHeaderLen = checksumSize + chunkHeaderSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - ChunkTypeIndex = 0x99 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - crcTable = crc32.MakeTable(crc32.Castagnoli) - magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. - magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. -) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - race.ReadSlice(b) - - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// literalExtraSize returns the extra size of encoding n literals. -// n should be >= 0 and <= math.MaxUint32. -func literalExtraSize(n int64) int64 { - if n == 0 { - return 0 - } - switch { - case n < 60: - return 1 - case n < 1<<8: - return 2 - case n < 1<<16: - return 3 - case n < 1<<24: - return 4 - default: - return 5 - } -} - -type byter interface { - Bytes() []byte -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go deleted file mode 100644 index fd15078f7..000000000 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Copyright (c) 2019+ Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package s2 - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "runtime" - "sync" - - "github.com/klauspost/compress/internal/race" -) - -const ( - levelUncompressed = iota + 1 - levelFast - levelBetter - levelBest -) - -// NewWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// Users must call Close to guarantee all data has been forwarded to -// the underlying io.Writer and that resources are released. -// They may also call Flush zero or more times before calling Close. -func NewWriter(w io.Writer, opts ...WriterOption) *Writer { - w2 := Writer{ - blockSize: defaultBlockSize, - concurrency: runtime.GOMAXPROCS(0), - randSrc: rand.Reader, - level: levelFast, - } - for _, opt := range opts { - if err := opt(&w2); err != nil { - w2.errState = err - return &w2 - } - } - w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) - w2.paramsOK = true - w2.ibuf = make([]byte, 0, w2.blockSize) - w2.buffers.New = func() interface{} { - return make([]byte, w2.obufLen) - } - w2.Reset(w) - return &w2 -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - errMu sync.Mutex - errState error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - ibuf []byte - - blockSize int - obufLen int - concurrency int - written int64 - uncompWritten int64 // Bytes sent to compression - output chan chan result - buffers sync.Pool - pad int - - writer io.Writer - randSrc io.Reader - writerWg sync.WaitGroup - index Index - customEnc func(dst, src []byte) int - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool - paramsOK bool - snappy bool - flushOnWrite bool - appendIndex bool - bufferCB func([]byte) - level uint8 -} - -type result struct { - b []byte - // return when writing - ret []byte - // Uncompressed start offset - startOffset int64 -} - -// err returns the previously set error. -// If no error has been set it is set to err if not nil. -func (w *Writer) err(err error) error { - w.errMu.Lock() - errSet := w.errState - if errSet == nil && err != nil { - w.errState = err - errSet = err - } - w.errMu.Unlock() - return errSet -} - -// Reset discards the writer's state and switches the Snappy writer to write to w. -// This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - if !w.paramsOK { - return - } - // Close previous writer, if any. - if w.output != nil { - close(w.output) - w.writerWg.Wait() - w.output = nil - } - w.errState = nil - w.ibuf = w.ibuf[:0] - w.wroteStreamHeader = false - w.written = 0 - w.writer = writer - w.uncompWritten = 0 - w.index.reset(w.blockSize) - - // If we didn't get a writer, stop here. - if writer == nil { - return - } - // If no concurrency requested, don't spin up writer goroutine. - if w.concurrency == 1 { - return - } - - toWrite := make(chan chan result, w.concurrency) - w.output = toWrite - w.writerWg.Add(1) - - // Start a writer goroutine that will write all output in order. - go func() { - defer w.writerWg.Done() - - // Get a queued write. - for write := range toWrite { - // Wait for the data to be available. - input := <-write - if input.ret != nil && w.bufferCB != nil { - w.bufferCB(input.ret) - input.ret = nil - } - in := input.b - if len(in) > 0 { - if w.err(nil) == nil { - // Don't expose data from previous buffers. - toWrite := in[:len(in):len(in)] - // Write to output. - n, err := writer.Write(toWrite) - if err == nil && n != len(toWrite) { - err = io.ErrShortBuffer - } - _ = w.err(err) - w.err(w.index.add(w.written, input.startOffset)) - w.written += int64(n) - } - } - if cap(in) >= w.obufLen { - w.buffers.Put(in) - } - // close the incoming write request. - // This can be used for synchronizing flushes. - close(write) - } - }() -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if err := w.err(nil); err != nil { - return 0, err - } - if w.flushOnWrite { - return w.write(p) - } - // If we exceed the input buffer size, start writing - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - } - nRet += n - p = p[n:] - } - if err := w.err(nil); err != nil { - return nRet, err - } - // p should always be able to fit into w.ibuf now. - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -// ReadFrom implements the io.ReaderFrom interface. -// Using this is typically more efficient since it avoids a memory copy. -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { - if err := w.err(nil); err != nil { - return 0, err - } - if len(w.ibuf) > 0 { - err := w.AsyncFlush() - if err != nil { - return 0, err - } - } - if br, ok := r.(byter); ok { - buf := br.Bytes() - if err := w.EncodeBuffer(buf); err != nil { - return 0, err - } - return int64(len(buf)), w.AsyncFlush() - } - for { - inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] - n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) - if err != nil { - if err == io.ErrUnexpectedEOF { - err = io.EOF - } - if err != io.EOF { - return n, w.err(err) - } - } - if n2 == 0 { - if cap(inbuf) >= w.obufLen { - w.buffers.Put(inbuf) - } - break - } - n += int64(n2) - err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) - if w.err(err2) != nil { - break - } - - if err != nil { - // We got EOF and wrote everything - break - } - } - - return n, w.err(nil) -} - -// AddSkippableBlock will add a skippable block to the stream. -// The ID must be 0x80-0xfe (inclusive). -// Length of the skippable block must be <= 16777215 bytes. -func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { - if err := w.err(nil); err != nil { - return err - } - if len(data) == 0 { - return nil - } - if id < 0x80 || id > chunkTypePadding { - return fmt.Errorf("invalid skippable block id %x", id) - } - if len(data) > maxChunkSize { - return fmt.Errorf("skippable block excessed maximum size") - } - var header [4]byte - chunkLen := len(data) - header[0] = id - header[1] = uint8(chunkLen >> 0) - header[2] = uint8(chunkLen >> 8) - header[3] = uint8(chunkLen >> 16) - if w.concurrency == 1 { - write := func(b []byte) error { - n, err := w.writer.Write(b) - if err = w.err(err); err != nil { - return err - } - if n != len(b) { - return w.err(io.ErrShortWrite) - } - w.written += int64(n) - return w.err(nil) - } - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - if w.snappy { - if err := write([]byte(magicChunkSnappy)); err != nil { - return err - } - } else { - if err := write([]byte(magicChunk)); err != nil { - return err - } - } - } - if err := write(header[:]); err != nil { - return err - } - return write(data) - } - - // Create output... - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - hWriter := make(chan result) - w.output <- hWriter - if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} - } else { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} - } - } - - // Copy input. - inbuf := w.buffers.Get().([]byte)[:4] - copy(inbuf, header[:]) - inbuf = append(inbuf, data...) - - output := make(chan result, 1) - // Queue output. - w.output <- output - output <- result{startOffset: w.uncompWritten, b: inbuf} - - return nil -} - -// EncodeBuffer will add a buffer to the stream. -// This is the fastest way to encode a stream, -// but the input buffer cannot be written to by the caller -// until Flush or Close has been called when concurrency != 1. -// -// Use the WriterBufferDone to receive a callback when the buffer is done -// Processing. -// -// Note that input is not buffered. -// This means that each write will result in discrete blocks being created. -// For buffered writes, use the regular Write function. -func (w *Writer) EncodeBuffer(buf []byte) (err error) { - if err := w.err(nil); err != nil { - return err - } - - if w.flushOnWrite { - _, err := w.write(buf) - return err - } - // Flush queued data first. - if len(w.ibuf) > 0 { - err := w.AsyncFlush() - if err != nil { - return err - } - } - if w.concurrency == 1 { - _, err := w.writeSync(buf) - if w.bufferCB != nil { - w.bufferCB(buf) - } - return err - } - - // Spawn goroutine and write block to output channel. - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - hWriter := make(chan result) - w.output <- hWriter - if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} - } else { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} - } - } - orgBuf := buf - for len(buf) > 0 { - // Cut input. - uncompressed := buf - if len(uncompressed) > w.blockSize { - uncompressed = uncompressed[:w.blockSize] - } - buf = buf[len(uncompressed):] - // Get an output buffer. - obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] - race.WriteSlice(obuf) - - output := make(chan result) - // Queue output now, so we keep order. - w.output <- output - res := result{ - startOffset: w.uncompWritten, - } - w.uncompWritten += int64(len(uncompressed)) - if len(buf) == 0 && w.bufferCB != nil { - res.ret = orgBuf - } - go func() { - race.ReadSlice(uncompressed) - - checksum := crc(uncompressed) - - // Set to uncompressed. - chunkType := uint8(chunkTypeUncompressedData) - chunkLen := 4 + len(uncompressed) - - // Attempt compressing. - n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) - n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) - - // Check if we should use this, or store as uncompressed instead. - if n2 > 0 { - chunkType = uint8(chunkTypeCompressedData) - chunkLen = 4 + n + n2 - obuf = obuf[:obufHeaderLen+n+n2] - } else { - // copy uncompressed - copy(obuf[obufHeaderLen:], uncompressed) - } - - // Fill in the per-chunk header that comes before the body. - obuf[0] = chunkType - obuf[1] = uint8(chunkLen >> 0) - obuf[2] = uint8(chunkLen >> 8) - obuf[3] = uint8(chunkLen >> 16) - obuf[4] = uint8(checksum >> 0) - obuf[5] = uint8(checksum >> 8) - obuf[6] = uint8(checksum >> 16) - obuf[7] = uint8(checksum >> 24) - - // Queue final output. - res.b = obuf - output <- res - }() - } - return nil -} - -func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { - if w.customEnc != nil { - if ret := w.customEnc(obuf, uncompressed); ret >= 0 { - return ret - } - } - if w.snappy { - switch w.level { - case levelFast: - return encodeBlockSnappy(obuf, uncompressed) - case levelBetter: - return encodeBlockBetterSnappy(obuf, uncompressed) - case levelBest: - return encodeBlockBestSnappy(obuf, uncompressed) - } - return 0 - } - switch w.level { - case levelFast: - return encodeBlock(obuf, uncompressed) - case levelBetter: - return encodeBlockBetter(obuf, uncompressed) - case levelBest: - return encodeBlockBest(obuf, uncompressed, nil) - } - return 0 -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if err := w.err(nil); err != nil { - return 0, err - } - if w.concurrency == 1 { - return w.writeSync(p) - } - - // Spawn goroutine and write block to output channel. - for len(p) > 0 { - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - hWriter := make(chan result) - w.output <- hWriter - if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} - } else { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} - } - } - - var uncompressed []byte - if len(p) > w.blockSize { - uncompressed, p = p[:w.blockSize], p[w.blockSize:] - } else { - uncompressed, p = p, nil - } - - // Copy input. - // If the block is incompressible, this is used for the result. - inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] - obuf := w.buffers.Get().([]byte)[:w.obufLen] - copy(inbuf[obufHeaderLen:], uncompressed) - uncompressed = inbuf[obufHeaderLen:] - - output := make(chan result) - // Queue output now, so we keep order. - w.output <- output - res := result{ - startOffset: w.uncompWritten, - } - w.uncompWritten += int64(len(uncompressed)) - - go func() { - checksum := crc(uncompressed) - - // Set to uncompressed. - chunkType := uint8(chunkTypeUncompressedData) - chunkLen := 4 + len(uncompressed) - - // Attempt compressing. - n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) - n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) - - // Check if we should use this, or store as uncompressed instead. - if n2 > 0 { - chunkType = uint8(chunkTypeCompressedData) - chunkLen = 4 + n + n2 - obuf = obuf[:obufHeaderLen+n+n2] - } else { - // Use input as output. - obuf, inbuf = inbuf, obuf - } - - // Fill in the per-chunk header that comes before the body. - obuf[0] = chunkType - obuf[1] = uint8(chunkLen >> 0) - obuf[2] = uint8(chunkLen >> 8) - obuf[3] = uint8(chunkLen >> 16) - obuf[4] = uint8(checksum >> 0) - obuf[5] = uint8(checksum >> 8) - obuf[6] = uint8(checksum >> 16) - obuf[7] = uint8(checksum >> 24) - - // Queue final output. - res.b = obuf - output <- res - - // Put unused buffer back in pool. - w.buffers.Put(inbuf) - }() - nRet += len(uncompressed) - } - return nRet, nil -} - -// writeFull is a special version of write that will always write the full buffer. -// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. -// The data will be written as a single block. -// The caller is not allowed to use inbuf after this function has been called. -func (w *Writer) writeFull(inbuf []byte) (errRet error) { - if err := w.err(nil); err != nil { - return err - } - - if w.concurrency == 1 { - _, err := w.writeSync(inbuf[obufHeaderLen:]) - if cap(inbuf) >= w.obufLen { - w.buffers.Put(inbuf) - } - return err - } - - // Spawn goroutine and write block to output channel. - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - hWriter := make(chan result) - w.output <- hWriter - if w.snappy { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} - } else { - hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} - } - } - - // Get an output buffer. - obuf := w.buffers.Get().([]byte)[:w.obufLen] - uncompressed := inbuf[obufHeaderLen:] - - output := make(chan result) - // Queue output now, so we keep order. - w.output <- output - res := result{ - startOffset: w.uncompWritten, - } - w.uncompWritten += int64(len(uncompressed)) - - go func() { - checksum := crc(uncompressed) - - // Set to uncompressed. - chunkType := uint8(chunkTypeUncompressedData) - chunkLen := 4 + len(uncompressed) - - // Attempt compressing. - n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) - n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) - - // Check if we should use this, or store as uncompressed instead. - if n2 > 0 { - chunkType = uint8(chunkTypeCompressedData) - chunkLen = 4 + n + n2 - obuf = obuf[:obufHeaderLen+n+n2] - } else { - // Use input as output. - obuf, inbuf = inbuf, obuf - } - - // Fill in the per-chunk header that comes before the body. - obuf[0] = chunkType - obuf[1] = uint8(chunkLen >> 0) - obuf[2] = uint8(chunkLen >> 8) - obuf[3] = uint8(chunkLen >> 16) - obuf[4] = uint8(checksum >> 0) - obuf[5] = uint8(checksum >> 8) - obuf[6] = uint8(checksum >> 16) - obuf[7] = uint8(checksum >> 24) - - // Queue final output. - res.b = obuf - output <- res - - // Put unused buffer back in pool. - w.buffers.Put(inbuf) - }() - return nil -} - -func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { - if err := w.err(nil); err != nil { - return 0, err - } - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - var n int - var err error - if w.snappy { - n, err = w.writer.Write(magicChunkSnappyBytes) - } else { - n, err = w.writer.Write(magicChunkBytes) - } - if err != nil { - return 0, w.err(err) - } - if n != len(magicChunk) { - return 0, w.err(io.ErrShortWrite) - } - w.written += int64(n) - } - - for len(p) > 0 { - var uncompressed []byte - if len(p) > w.blockSize { - uncompressed, p = p[:w.blockSize], p[w.blockSize:] - } else { - uncompressed, p = p, nil - } - - obuf := w.buffers.Get().([]byte)[:w.obufLen] - checksum := crc(uncompressed) - - // Set to uncompressed. - chunkType := uint8(chunkTypeUncompressedData) - chunkLen := 4 + len(uncompressed) - - // Attempt compressing. - n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) - n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) - - if n2 > 0 { - chunkType = uint8(chunkTypeCompressedData) - chunkLen = 4 + n + n2 - obuf = obuf[:obufHeaderLen+n+n2] - } else { - obuf = obuf[:8] - } - - // Fill in the per-chunk header that comes before the body. - obuf[0] = chunkType - obuf[1] = uint8(chunkLen >> 0) - obuf[2] = uint8(chunkLen >> 8) - obuf[3] = uint8(chunkLen >> 16) - obuf[4] = uint8(checksum >> 0) - obuf[5] = uint8(checksum >> 8) - obuf[6] = uint8(checksum >> 16) - obuf[7] = uint8(checksum >> 24) - - n, err := w.writer.Write(obuf) - if err != nil { - return 0, w.err(err) - } - if n != len(obuf) { - return 0, w.err(io.ErrShortWrite) - } - w.err(w.index.add(w.written, w.uncompWritten)) - w.written += int64(n) - w.uncompWritten += int64(len(uncompressed)) - - if chunkType == chunkTypeUncompressedData { - // Write uncompressed data. - n, err := w.writer.Write(uncompressed) - if err != nil { - return 0, w.err(err) - } - if n != len(uncompressed) { - return 0, w.err(io.ErrShortWrite) - } - w.written += int64(n) - } - w.buffers.Put(obuf) - // Queue final output. - nRet += len(uncompressed) - } - return nRet, nil -} - -// AsyncFlush writes any buffered bytes to a block and starts compressing it. -// It does not wait for the output has been written as Flush() does. -func (w *Writer) AsyncFlush() error { - if err := w.err(nil); err != nil { - return err - } - - // Queue any data still in input buffer. - if len(w.ibuf) != 0 { - if !w.wroteStreamHeader { - _, err := w.writeSync(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err(err) - } else { - _, err := w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - err = w.err(err) - if err != nil { - return err - } - } - } - return w.err(nil) -} - -// Flush flushes the Writer to its underlying io.Writer. -// This does not apply padding. -func (w *Writer) Flush() error { - if err := w.AsyncFlush(); err != nil { - return err - } - if w.output == nil { - return w.err(nil) - } - - // Send empty buffer - res := make(chan result) - w.output <- res - // Block until this has been picked up. - res <- result{b: nil, startOffset: w.uncompWritten} - // When it is closed, we have flushed. - <-res - return w.err(nil) -} - -// Close calls Flush and then closes the Writer. -// Calling Close multiple times is ok, -// but calling CloseIndex after this will make it not return the index. -func (w *Writer) Close() error { - _, err := w.closeIndex(w.appendIndex) - return err -} - -// CloseIndex calls Close and returns an index on first call. -// This is not required if you are only adding index to a stream. -func (w *Writer) CloseIndex() ([]byte, error) { - return w.closeIndex(true) -} - -func (w *Writer) closeIndex(idx bool) ([]byte, error) { - err := w.Flush() - if w.output != nil { - close(w.output) - w.writerWg.Wait() - w.output = nil - } - - var index []byte - if w.err(err) == nil && w.writer != nil { - // Create index. - if idx { - compSize := int64(-1) - if w.pad <= 1 { - compSize = w.written - } - index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) - // Count as written for padding. - if w.appendIndex { - w.written += int64(len(index)) - } - } - - if w.pad > 1 { - tmp := w.ibuf[:0] - if len(index) > 0 { - // Allocate another buffer. - tmp = w.buffers.Get().([]byte)[:0] - defer w.buffers.Put(tmp) - } - add := calcSkippableFrame(w.written, int64(w.pad)) - frame, err := skippableFrame(tmp, add, w.randSrc) - if err = w.err(err); err != nil { - return nil, err - } - n, err2 := w.writer.Write(frame) - if err2 == nil && n != len(frame) { - err2 = io.ErrShortWrite - } - _ = w.err(err2) - } - if len(index) > 0 && w.appendIndex { - n, err2 := w.writer.Write(index) - if err2 == nil && n != len(index) { - err2 = io.ErrShortWrite - } - _ = w.err(err2) - } - } - err = w.err(errClosed) - if err == errClosed { - return index, nil - } - return nil, err -} - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) - } - if int64(total) >= maxBlockSize+skippableFrameHeader { - return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) - } - // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" - dst = append(dst, chunkTypePadding) - f := uint32(total - skippableFrameHeader) - // Add chunk length. - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) - // Add data - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} - -var errClosed = errors.New("s2: Writer is closed") - -// WriterOption is an option for creating a encoder. -type WriterOption func(*Writer) error - -// WriterConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. -func WriterConcurrency(n int) WriterOption { - return func(w *Writer) error { - if n <= 0 { - return errors.New("concurrency must be at least 1") - } - w.concurrency = n - return nil - } -} - -// WriterAddIndex will append an index to the end of a stream -// when it is closed. -func WriterAddIndex() WriterOption { - return func(w *Writer) error { - w.appendIndex = true - return nil - } -} - -// WriterBetterCompression will enable better compression. -// EncodeBetter compresses better than Encode but typically with a -// 10-40% speed decrease on both compression and decompression. -func WriterBetterCompression() WriterOption { - return func(w *Writer) error { - w.level = levelBetter - return nil - } -} - -// WriterBestCompression will enable better compression. -// EncodeBest compresses better than Encode but typically with a -// big speed decrease on compression. -func WriterBestCompression() WriterOption { - return func(w *Writer) error { - w.level = levelBest - return nil - } -} - -// WriterUncompressed will bypass compression. -// The stream will be written as uncompressed blocks only. -// If concurrency is > 1 CRC and output will still be done async. -func WriterUncompressed() WriterOption { - return func(w *Writer) error { - w.level = levelUncompressed - return nil - } -} - -// WriterBufferDone will perform a callback when EncodeBuffer has finished -// writing a buffer to the output and the buffer can safely be reused. -// If the buffer was split into several blocks, it will be sent after the last block. -// Callbacks will not be done concurrently. -func WriterBufferDone(fn func(b []byte)) WriterOption { - return func(w *Writer) error { - w.bufferCB = fn - return nil - } -} - -// WriterBlockSize allows to override the default block size. -// Blocks will be this size or smaller. -// Minimum size is 4KB and maximum size is 4MB. -// -// Bigger blocks may give bigger throughput on systems with many cores, -// and will increase compression slightly, but it will limit the possible -// concurrency for smaller payloads for both encoding and decoding. -// Default block size is 1MB. -// -// When writing Snappy compatible output using WriterSnappyCompat, -// the maximum block size is 64KB. -func WriterBlockSize(n int) WriterOption { - return func(w *Writer) error { - if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { - return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") - } - if n > maxBlockSize || n < minBlockSize { - return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") - } - w.blockSize = n - return nil - } -} - -// WriterPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 4MB. -// The padded area will be filled with data from crypto/rand.Reader. -// The padding will be applied whenever Close is called on the writer. -func WriterPadding(n int) WriterOption { - return func(w *Writer) error { - if n <= 0 { - return fmt.Errorf("s2: padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - w.pad = 0 - } - if n > maxBlockSize { - return fmt.Errorf("s2: padding must less than 4MB") - } - w.pad = n - return nil - } -} - -// WriterPaddingSrc will get random data for padding from the supplied source. -// By default crypto/rand is used. -func WriterPaddingSrc(reader io.Reader) WriterOption { - return func(w *Writer) error { - w.randSrc = reader - return nil - } -} - -// WriterSnappyCompat will write snappy compatible output. -// The output can be decompressed using either snappy or s2. -// If block size is more than 64KB it is set to that. -func WriterSnappyCompat() WriterOption { - return func(w *Writer) error { - w.snappy = true - if w.blockSize > 64<<10 { - // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. - // And allows us to skip some size checks. - w.blockSize = (64 << 10) - 8 - } - return nil - } -} - -// WriterFlushOnWrite will compress blocks on each call to the Write function. -// -// This is quite inefficient as blocks size will depend on the write size. -// -// Use WriterConcurrency(1) to also make sure that output is flushed. -// When Write calls return, otherwise they will be written when compression is done. -func WriterFlushOnWrite() WriterOption { - return func(w *Writer) error { - w.flushOnWrite = true - return nil - } -} - -// WriterCustomEncoder allows to override the encoder for blocks on the stream. -// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. -// Block size (initial varint) should not be added by the encoder. -// Returning value 0 indicates the block could not be compressed. -// Returning a negative value indicates that compression should be attempted. -// The function should expect to be called concurrently. -func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { - return func(w *Writer) error { - w.customEnc = fn - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 5a4412f90..000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.19 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 92e2347bb..000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca98394..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f175b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 9c28840c3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - if compMode&3 != 0 { - return errors.New("corrupt block: reserved bits not zero") - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - if debugDecoder { - println("Reading table for", tableIndex(i)) - } - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 32a7f401d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// encodeRLE will encode an RLE block. -func (b *blockEnc) encodeRLE(val byte, length uint32) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(length) - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, val) -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { - // Check common RLE cases. - seq := b.sequences[0] - if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { - // Offset == 1 and 0 or 1 literals. - b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) - return nil - } - } - - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a388553..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 6a5a2988b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - _, err := h.DecodeAndStrip(in) - return err -} - -// DecodeAndStrip will decode the header from the beginning of the stream -// and on success return the remaining bytes. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { - *h = Header{} - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return nil, ErrMagicMismatch - } - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return in[4:], nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return nil, errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return in, nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return in, nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return in, nil -} - -// AppendTo will append the encoded header to the dst slice. -// There is no error checking performed on the header values. -func (h *Header) AppendTo(dst []byte) ([]byte, error) { - if h.Skippable { - magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} - magic[0] |= byte(h.SkippableID & 0xf) - dst = append(dst, magic[:]...) - f := h.SkippableSize - return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil - } - f := frameHeader{ - ContentSize: h.FrameContentSize, - WindowSize: uint32(h.WindowSize), - SingleSegment: h.SingleSegment, - Checksum: h.HasCheckSum, - DictID: h.DictionaryID, - } - return f.appendTo(dst), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index bbca17234..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f00f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index b7b83164b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,565 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - if len(block.sequences) == 0 { - continue - } - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if int(offset) >= len(o.History) { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - if nUsed == 0 { - nUsed = 1 - } - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - - // Ensure we aren't trying to represent RLE. - if maxCount == fakeLength { - for i := range hist { - if uint8(i) == maxSym { - fakeLength++ - maxSym++ - hist[i+1] = 1 - if maxSym > 1 { - break - } - } - if hist[0] == 0 { - fakeLength++ - hist[i] = 1 - if maxSym > 1 { - break - } - } - } - } - - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca46038a..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 4613724e9..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep) & 3) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if m.rep <= 0 { - // Extend candidate match backwards as far as possible. - // Do not extend repeats as we can assume they are optimal - // and offsets change if s == nextEmit. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { - panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - index0 := s + 1 - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if best.offset >= best.s { - panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) - } - if best.s < nextEmit { - panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) - } - if best.offset < s-e.maxMatchOff { - panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) - } - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - s = best.s - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - s = best.s + best.length - nextEmit = s - - // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - - // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } - - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - off++ - } - if s >= sLimit { - break encodeLoop - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 84a79fde7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1252 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index d36be7bd8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da7d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 8f8223cd3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - if s.eofWritten { - return 0, ErrEncoderClosed - } - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - if final { - s.eofWritten = true - } - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - // Ignore Flush after Close. - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - // Ignore Flush after Close. - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - if errors.Is(s.err, ErrEncoderClosed) { - return nil - } - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - if s.err == nil { - s.err = ErrEncoderClosed - return nil - } - - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - e.encoders <- enc - }() - return e.encodeAll(enc, src, dst) -} - -func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 20671dcb9..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level and max 8MB. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 8 << 20 - case SpeedBestCompression: - o.windowSize = 8 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index e47af66e7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - if debugDecoder { - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - } - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 667ca0679..000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are not stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a72..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde39869..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 8adfebb02..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - v = 1 - } - symbolNext[i] = uint16(v) - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - for { - // lowprob area - position = (position + step) & tableMask - if position <= highThreshold { - break - } - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21eb..000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d44..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c8200..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa91..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index ae7d4d329..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(s *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD s+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221edf4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16cefc..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb10..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 0782b86e3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c0..000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d82d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index c59f17e07..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index f5591fa1e..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4151 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b788..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594e8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 066bef2a4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,125 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrEncoderClosed will be returned if the Encoder was used after - // Close has been called. - ErrEncoderClosed = errors.New("encoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/klauspost/pgzip/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml deleted file mode 100644 index 34704000e..000000000 --- a/vendor/github.com/klauspost/pgzip/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ - -arch: - - amd64 - - ppc64le -language: go - -os: - - linux - - osx - -go: - - 1.13.x - - 1.14.x - - 1.15.x - - master - -env: - - GO111MODULE=off - -script: - - diff <(gofmt -d .) <(printf "") - - go test -v -cpu=1,2,4 . - - go test -v -cpu=2 -race -short . - -matrix: - allow_failures: - - go: 'master' - fast_finish: true diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/github.com/klauspost/pgzip/GO_LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE deleted file mode 100644 index 2bdc0d751..000000000 --- a/vendor/github.com/klauspost/pgzip/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md deleted file mode 100644 index ecc8726fa..000000000 --- a/vendor/github.com/klauspost/pgzip/README.md +++ /dev/null @@ -1,134 +0,0 @@ -pgzip -===== - -Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". - -This will split compression into blocks that are compressed in parallel. -This can be useful for compressing big amounts of data. The output is a standard gzip file. - -The gzip decompression is modified so it decompresses ahead of the current reader. -This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. -CRC calculation also takes place in a separate goroutine. - -You should only use this if you are (de)compressing big amounts of data, -say **more than 1MB** at the time, otherwise you will not see any benefit, -and it will likely be faster to use the internal gzip library -or [this package](https://github.com/klauspost/compress). - -It is important to note that this library creates and reads *standard gzip files*. -You do not have to match the compressor/decompressor to get the described speedups, -and the gzip files are fully compatible with other gzip readers/writers. - -A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), -which has the same feature, as well as seeking in the resulting file. -The only drawback is a slightly bigger overhead compared to this and pure gzip. -See a comparison below. - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg -[2]: https://godoc.org/github.com/klauspost/pgzip -[3]: https://travis-ci.org/klauspost/pgzip.svg -[4]: https://travis-ci.org/klauspost/pgzip - -Installation -==== -```go get github.com/klauspost/pgzip/...``` - -You might need to get/update the dependencies: - -``` -go get -u github.com/klauspost/compress -``` - -Usage -==== -[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) - -To use as a replacement for gzip, exchange - -```import "compress/gzip"``` -with -```import gzip "github.com/klauspost/pgzip"```. - -# Changes - -* Oct 6, 2016: Fixed an issue if the destination writer returned an error. -* Oct 6, 2016: Better buffer reuse, should now generate less garbage. -* Oct 6, 2016: Output does not change based on write sizes. -* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. -* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. - -Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. - -## Compression -The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). - -To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(1MB, runtime.GOMAXPROCS(0)), meaning blocks are split at 1 MB and up to the number of CPU threads blocks can be processing at once before the writer blocks. - - -Example: -``` -var b bytes.Buffer -w := gzip.NewWriter(&b) -w.SetConcurrency(100000, 10) -w.Write([]byte("hello, world\n")) -w.Close() -``` - -To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. - -You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. - -Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. - -## Decompression - -Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). - -The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. - -See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) - -Performance -==== -## Compression - -See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). - -Compression cost is usually about 0.2% with default settings with a block size of 250k. - -Example with GOMAXPROC set to 32 (16 core CPU) - -Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. - -Compressor | MB/sec | speedup | size | size overhead (lower=better) -------------|----------|---------|------|--------- -[gzip](http://golang.org/pkg/compress/gzip) (golang) | 16.91MB/s (1 thread) | 1.0x | 4781329307 | 0% -[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 127.10MB/s (1 thread) | 7.52x | 4885366806 | +2.17% -[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 2085.35MB/s| 123.34x | 4886132566 | +2.19% -[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 334.04MB/s | 19.76x | 4786890417 | +0.12% - -pgzip also contains a [huffman only compression](https://github.com/klauspost/compress#linear-time-compression-huffman-only) mode, that will allow compression at ~450MB per core per second, largely independent of the content. - -See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. - -## Decompression - -The decompression speedup is there because it allows you to do other work while the decompression is taking place. - -In the example above, the numbers are as follows on a 4 CPU machine: - -Decompressor | Time | Speedup --------------|------|-------- -[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% -[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 43.48s | 104% - -But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. - -This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. - -I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. - -# License -This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go deleted file mode 100644 index 3c4b32f16..000000000 --- a/vendor/github.com/klauspost/pgzip/gunzip.go +++ /dev/null @@ -1,597 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pgzip implements reading and writing of gzip format compressed files, -// as specified in RFC 1952. -// -// This is a drop in replacement for "compress/gzip". -// This will split compression into blocks that are compressed in parallel. -// This can be useful for compressing big amounts of data. -// The gzip decompression has not been modified, but remains in the package, -// so you can use it as a complete replacement for "compress/gzip". -// -// See more at https://github.com/klauspost/pgzip -package pgzip - -import ( - "bufio" - "errors" - "hash" - "hash/crc32" - "io" - "sync" - "time" - - "github.com/klauspost/compress/flate" -) - -const ( - gzipID1 = 0x1f - gzipID2 = 0x8b - gzipDeflate = 8 - flagText = 1 << 0 - flagHdrCrc = 1 << 1 - flagExtra = 1 << 2 - flagName = 1 << 3 - flagComment = 1 << 4 -) - -func makeReader(r io.Reader) flate.Reader { - if rr, ok := r.(flate.Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -var ( - // ErrChecksum is returned when reading GZIP data that has an invalid checksum. - ErrChecksum = errors.New("gzip: invalid checksum") - // ErrHeader is returned when reading GZIP data that has an invalid header. - ErrHeader = errors.New("gzip: invalid header") -) - -// The gzip file stores a header giving metadata about the compressed file. -// That header is exposed as the fields of the Writer and Reader structs. -type Header struct { - Comment string // comment - Extra []byte // "extra data" - ModTime time.Time // modification time - Name string // file name - OS byte // operating system type -} - -// A Reader is an io.Reader that can be read to retrieve -// uncompressed data from a gzip-format compressed file. -// -// In general, a gzip file can be a concatenation of gzip files, -// each with its own header. Reads from the Reader -// return the concatenation of the uncompressed data of each. -// Only the first header is recorded in the Reader fields. -// -// Gzip files store a length and checksum of the uncompressed data. -// The Reader will return a ErrChecksum when Read -// reaches the end of the uncompressed data if it does not -// have the expected length or checksum. Clients should treat data -// returned by Read as tentative until they receive the io.EOF -// marking the end of the data. -type Reader struct { - Header - r flate.Reader - decompressor io.ReadCloser - digest hash.Hash32 - size uint32 - flg byte - buf [512]byte - err error - closeErr chan error - multistream bool - - readAhead chan read - roff int // read offset - current []byte - closeReader chan struct{} - lastBlock bool - blockSize int - blocks int - - activeRA bool // Indication if readahead is active - mu sync.Mutex // Lock for above - - blockPool chan []byte -} - -type read struct { - b []byte - err error -} - -// NewReader creates a new Reader reading the given reader. -// The implementation buffers input and may read more data than necessary from r. -// It is the caller's responsibility to call Close on the Reader when done. -func NewReader(r io.Reader) (*Reader, error) { - z := new(Reader) - z.blocks = defaultBlocks - z.blockSize = defaultBlockSize - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.multistream = true - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - if err := z.readHeader(true); err != nil { - return nil, err - } - return z, nil -} - -// NewReaderN creates a new Reader reading the given reader. -// The implementation buffers input and may read more data than necessary from r. -// It is the caller's responsibility to call Close on the Reader when done. -// -// With this you can control the approximate size of your blocks, -// as well as how many blocks you want to have prefetched. -// -// Default values for this is blockSize = 250000, blocks = 16, -// meaning up to 16 blocks of maximum 250000 bytes will be -// prefetched. -func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { - z := new(Reader) - z.blocks = blocks - z.blockSize = blockSize - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.multistream = true - - // Account for too small values - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - if err := z.readHeader(true); err != nil { - return nil, err - } - return z, nil -} - -// Reset discards the Reader z's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) error { - z.killReadAhead() - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.size = 0 - z.err = nil - z.multistream = true - - // Account for uninitialized values - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - - if z.blockPool == nil { - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - } - - return z.readHeader(true) -} - -// Multistream controls whether the reader supports multistream files. -// -// If enabled (the default), the Reader expects the input to be a sequence -// of individually gzipped data streams, each with its own header and -// trailer, ending at EOF. The effect is that the concatenation of a sequence -// of gzipped files is treated as equivalent to the gzip of the concatenation -// of the sequence. This is standard behavior for gzip readers. -// -// Calling Multistream(false) disables this behavior; disabling the behavior -// can be useful when reading file formats that distinguish individual gzip -// data streams or mix gzip data streams with other data streams. -// In this mode, when the Reader reaches the end of the data stream, -// Read returns io.EOF. If the underlying reader implements io.ByteReader, -// it will be left positioned just after the gzip stream. -// To start the next stream, call z.Reset(r) followed by z.Multistream(false). -// If there is no next stream, z.Reset(r) will return io.EOF. -func (z *Reader) Multistream(ok bool) { - z.multistream = ok -} - -// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). -func get4(p []byte) uint32 { - return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 -} - -func (z *Reader) readString() (string, error) { - var err error - needconv := false - for i := 0; ; i++ { - if i >= len(z.buf) { - return "", ErrHeader - } - z.buf[i], err = z.r.ReadByte() - if err != nil { - return "", err - } - if z.buf[i] > 0x7f { - needconv = true - } - if z.buf[i] == 0 { - // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). - if needconv { - s := make([]rune, 0, i) - for _, v := range z.buf[0:i] { - s = append(s, rune(v)) - } - return string(s), nil - } - return string(z.buf[0:i]), nil - } - } -} - -func (z *Reader) read2() (uint32, error) { - _, err := io.ReadFull(z.r, z.buf[0:2]) - if err != nil { - return 0, err - } - return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil -} - -func (z *Reader) readHeader(save bool) error { - z.killReadAhead() - - _, err := io.ReadFull(z.r, z.buf[0:10]) - if err != nil { - return err - } - if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { - return ErrHeader - } - z.flg = z.buf[3] - if save { - z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) - // z.buf[8] is xfl, ignored - z.OS = z.buf[9] - } - z.digest.Reset() - z.digest.Write(z.buf[0:10]) - - if z.flg&flagExtra != 0 { - n, err := z.read2() - if err != nil { - return err - } - data := make([]byte, n) - if _, err = io.ReadFull(z.r, data); err != nil { - return err - } - if save { - z.Extra = data - } - } - - var s string - if z.flg&flagName != 0 { - if s, err = z.readString(); err != nil { - return err - } - if save { - z.Name = s - } - } - - if z.flg&flagComment != 0 { - if s, err = z.readString(); err != nil { - return err - } - if save { - z.Comment = s - } - } - - if z.flg&flagHdrCrc != 0 { - n, err := z.read2() - if err != nil { - return err - } - sum := z.digest.Sum32() & 0xFFFF - if n != sum { - return ErrHeader - } - } - - z.digest.Reset() - z.decompressor = flate.NewReader(z.r) - z.doReadAhead() - return nil -} - -func (z *Reader) killReadAhead() error { - z.mu.Lock() - defer z.mu.Unlock() - if z.activeRA { - if z.closeReader != nil { - close(z.closeReader) - } - - // Wait for decompressor to be closed and return error, if any. - e, ok := <-z.closeErr - z.activeRA = false - - for blk := range z.readAhead { - if blk.b != nil { - z.blockPool <- blk.b - } - } - if cap(z.current) > 0 { - z.blockPool <- z.current - z.current = nil - } - if !ok { - // Channel is closed, so if there was any error it has already been returned. - return nil - } - return e - } - return nil -} - -// Starts readahead. -// Will return on error (including io.EOF) -// or when z.closeReader is closed. -func (z *Reader) doReadAhead() { - z.mu.Lock() - defer z.mu.Unlock() - z.activeRA = true - - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - ra := make(chan read, z.blocks) - z.readAhead = ra - closeReader := make(chan struct{}, 0) - z.closeReader = closeReader - z.lastBlock = false - closeErr := make(chan error, 1) - z.closeErr = closeErr - z.size = 0 - z.roff = 0 - z.current = nil - decomp := z.decompressor - - go func() { - defer func() { - closeErr <- decomp.Close() - close(closeErr) - close(ra) - }() - - // We hold a local reference to digest, since - // it way be changed by reset. - digest := z.digest - var wg sync.WaitGroup - for { - var buf []byte - select { - case buf = <-z.blockPool: - case <-closeReader: - return - } - buf = buf[0:z.blockSize] - // Try to fill the buffer - n, err := io.ReadFull(decomp, buf) - if err == io.ErrUnexpectedEOF { - if n > 0 { - err = nil - } else { - // If we got zero bytes, we need to establish if - // we reached end of stream or truncated stream. - _, err = decomp.Read([]byte{}) - if err == io.EOF { - err = nil - } - } - } - if n < len(buf) { - buf = buf[0:n] - } - wg.Wait() - wg.Add(1) - go func() { - digest.Write(buf) - wg.Done() - }() - z.size += uint32(n) - - // If we return any error, out digest must be ready - if err != nil { - wg.Wait() - } - select { - case z.readAhead <- read{b: buf, err: err}: - case <-closeReader: - // Sent on close, we don't care about the next results - z.blockPool <- buf - return - } - if err != nil { - return - } - } - }() -} - -func (z *Reader) Read(p []byte) (n int, err error) { - if z.err != nil { - return 0, z.err - } - if len(p) == 0 { - return 0, nil - } - - for { - if len(z.current) == 0 && !z.lastBlock { - read := <-z.readAhead - - if read.err != nil { - // If not nil, the reader will have exited - z.closeReader = nil - - if read.err != io.EOF { - z.err = read.err - return - } - if read.err == io.EOF { - z.lastBlock = true - err = nil - } - } - z.current = read.b - z.roff = 0 - } - avail := z.current[z.roff:] - if len(p) >= len(avail) { - // If len(p) >= len(current), return all content of current - n = copy(p, avail) - z.blockPool <- z.current - z.current = nil - if z.lastBlock { - err = io.EOF - break - } - } else { - // We copy as much as there is space for - n = copy(p, avail) - z.roff += n - } - return - } - - // Finished file; check checksum + size. - if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { - z.err = err - return 0, err - } - crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) - sum := z.digest.Sum32() - if sum != crc32 || isize != z.size { - z.err = ErrChecksum - return 0, z.err - } - - // File is ok; should we attempt reading one more? - if !z.multistream { - return 0, io.EOF - } - - // Is there another? - if err = z.readHeader(false); err != nil { - z.err = err - return - } - - // Yes. Reset and read from it. - return z.Read(p) -} - -func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { - total := int64(0) - avail := z.current[z.roff:] - if len(avail) != 0 { - n, err := w.Write(avail) - if n != len(avail) { - return total, io.ErrShortWrite - } - total += int64(n) - if err != nil { - return total, err - } - z.blockPool <- z.current - z.current = nil - } - for { - if z.err != nil { - return total, z.err - } - // We write both to output and digest. - for { - // Read from input - read := <-z.readAhead - if read.err != nil { - // If not nil, the reader will have exited - z.closeReader = nil - - if read.err != io.EOF { - z.err = read.err - return total, z.err - } - if read.err == io.EOF { - z.lastBlock = true - err = nil - } - } - // Write what we got - n, err := w.Write(read.b) - if n != len(read.b) { - return total, io.ErrShortWrite - } - total += int64(n) - if err != nil { - return total, err - } - // Put block back - z.blockPool <- read.b - if z.lastBlock { - break - } - } - - // Finished file; check checksum + size. - if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { - z.err = err - return total, err - } - crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) - sum := z.digest.Sum32() - if sum != crc32 || isize != z.size { - z.err = ErrChecksum - return total, z.err - } - // File is ok; should we attempt reading one more? - if !z.multistream { - return total, nil - } - - // Is there another? - err = z.readHeader(false) - if err == io.EOF { - return total, nil - } - if err != nil { - z.err = err - return total, err - } - } -} - -// Close closes the Reader. It does not close the underlying io.Reader. -func (z *Reader) Close() error { - return z.killReadAhead() -} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go deleted file mode 100644 index 257c4d299..000000000 --- a/vendor/github.com/klauspost/pgzip/gzip.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pgzip - -import ( - "bytes" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "runtime" - "sync" - "time" - - "github.com/klauspost/compress/flate" -) - -const ( - defaultBlockSize = 1 << 20 - tailSize = 16384 - defaultBlocks = 4 -) - -// These constants are copied from the flate package, so that code that imports -// "compress/gzip" does not also have to import "compress/flate". -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression - ConstantCompression = flate.ConstantCompression - HuffmanOnly = flate.HuffmanOnly -) - -// A Writer is an io.WriteCloser. -// Writes to a Writer are compressed and written to w. -type Writer struct { - Header - w io.Writer - level int - wroteHeader bool - blockSize int - blocks int - currentBuffer []byte - prevTail []byte - digest hash.Hash32 - size int - closed bool - buf [10]byte - errMu sync.RWMutex - err error - pushedErr chan struct{} - results chan result - dictFlatePool sync.Pool - dstPool sync.Pool - wg sync.WaitGroup -} - -type result struct { - result chan []byte - notifyWritten chan struct{} -} - -// Use SetConcurrency to finetune the concurrency level if needed. -// -// With this you can control the approximate size of your blocks, -// as well as how many you want to be processing in parallel. -// -// Default values for this is SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)), -// meaning blocks are split at 1 MB and up to the number of CPU threads -// can be processing at once before the writer blocks. -func (z *Writer) SetConcurrency(blockSize, blocks int) error { - if blockSize <= tailSize { - return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) - } - if blocks <= 0 { - return errors.New("gzip: blocks cannot be zero or less") - } - if blockSize == z.blockSize && blocks == z.blocks { - return nil - } - z.blockSize = blockSize - z.results = make(chan result, blocks) - z.blocks = blocks - z.dstPool.New = func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) } - return nil -} - -// NewWriter returns a new Writer. -// Writes to the returned writer are compressed and written to w. -// -// It is the caller's responsibility to call Close on the WriteCloser when done. -// Writes may be buffered and not flushed until Close. -// -// Callers that wish to set the fields in Writer.Header must do so before -// the first call to Write or Close. The Comment and Name header fields are -// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO -// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an -// error on Write. -func NewWriter(w io.Writer) *Writer { - z, _ := NewWriterLevel(w, DefaultCompression) - return z -} - -// NewWriterLevel is like NewWriter but specifies the compression level instead -// of assuming DefaultCompression. -// -// The compression level can be DefaultCompression, NoCompression, or any -// integer value between BestSpeed and BestCompression inclusive. The error -// returned will be nil if the level is valid. -func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - if level < ConstantCompression || level > BestCompression { - return nil, fmt.Errorf("gzip: invalid compression level: %d", level) - } - z := new(Writer) - z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) - z.init(w, level) - return z, nil -} - -// This function must be used by goroutines to set an -// error condition, since z.err access is restricted -// to the callers goruotine. -func (z *Writer) pushError(err error) { - z.errMu.Lock() - if z.err != nil { - z.errMu.Unlock() - return - } - z.err = err - close(z.pushedErr) - z.errMu.Unlock() -} - -func (z *Writer) init(w io.Writer, level int) { - z.wg.Wait() - digest := z.digest - if digest != nil { - digest.Reset() - } else { - digest = crc32.NewIEEE() - } - z.Header = Header{OS: 255} - z.w = w - z.level = level - z.digest = digest - z.pushedErr = make(chan struct{}, 0) - z.results = make(chan result, z.blocks) - z.err = nil - z.closed = false - z.Comment = "" - z.Extra = nil - z.ModTime = time.Time{} - z.wroteHeader = false - z.currentBuffer = nil - z.buf = [10]byte{} - z.prevTail = nil - z.size = 0 - if z.dictFlatePool.New == nil { - z.dictFlatePool.New = func() interface{} { - f, _ := flate.NewWriterDict(w, level, nil) - return f - } - } -} - -// Reset discards the Writer z's state and makes it equivalent to the -// result of its original state from NewWriter or NewWriterLevel, but -// writing to w instead. This permits reusing a Writer rather than -// allocating a new one. -func (z *Writer) Reset(w io.Writer) { - if z.results != nil && !z.closed { - close(z.results) - } - z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) - z.init(w, z.level) -} - -// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). -func put2(p []byte, v uint16) { - p[0] = uint8(v >> 0) - p[1] = uint8(v >> 8) -} - -func put4(p []byte, v uint32) { - p[0] = uint8(v >> 0) - p[1] = uint8(v >> 8) - p[2] = uint8(v >> 16) - p[3] = uint8(v >> 24) -} - -// writeBytes writes a length-prefixed byte slice to z.w. -func (z *Writer) writeBytes(b []byte) error { - if len(b) > 0xffff { - return errors.New("gzip.Write: Extra data is too large") - } - put2(z.buf[0:2], uint16(len(b))) - _, err := z.w.Write(z.buf[0:2]) - if err != nil { - return err - } - _, err = z.w.Write(b) - return err -} - -// writeString writes a UTF-8 string s in GZIP's format to z.w. -// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). -func (z *Writer) writeString(s string) (err error) { - // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. - needconv := false - for _, v := range s { - if v == 0 || v > 0xff { - return errors.New("gzip.Write: non-Latin-1 header string") - } - if v > 0x7f { - needconv = true - } - } - if needconv { - b := make([]byte, 0, len(s)) - for _, v := range s { - b = append(b, byte(v)) - } - _, err = z.w.Write(b) - } else { - _, err = io.WriteString(z.w, s) - } - if err != nil { - return err - } - // GZIP strings are NUL-terminated. - z.buf[0] = 0 - _, err = z.w.Write(z.buf[0:1]) - return err -} - -// compressCurrent will compress the data currently buffered -// This should only be called from the main writer/flush/closer -func (z *Writer) compressCurrent(flush bool) { - c := z.currentBuffer - if len(c) > z.blockSize { - // This can never happen through the public interface. - panic("len(z.currentBuffer) > z.blockSize (most likely due to concurrent Write race)") - } - - r := result{} - r.result = make(chan []byte, 1) - r.notifyWritten = make(chan struct{}, 0) - // Reserve a result slot - select { - case z.results <- r: - case <-z.pushedErr: - return - } - - z.wg.Add(1) - tail := z.prevTail - if len(c) > tailSize { - buf := z.dstPool.Get().([]byte) // Put in .compressBlock - // Copy tail from current buffer before handing the buffer over to the - // compressBlock goroutine. - buf = append(buf[:0], c[len(c)-tailSize:]...) - z.prevTail = buf - } else { - z.prevTail = nil - } - go z.compressBlock(c, tail, r, z.closed) - - z.currentBuffer = z.dstPool.Get().([]byte) // Put in .compressBlock - z.currentBuffer = z.currentBuffer[:0] - - // Wait if flushing - if flush { - <-r.notifyWritten - } -} - -// Returns an error if it has been set. -// Cannot be used by functions that are from internal goroutines. -func (z *Writer) checkError() error { - z.errMu.RLock() - err := z.err - z.errMu.RUnlock() - return err -} - -// Write writes a compressed form of p to the underlying io.Writer. The -// compressed bytes are not necessarily flushed to output until -// the Writer is closed or Flush() is called. -// -// The function will return quickly, if there are unused buffers. -// The sent slice (p) is copied, and the caller is free to re-use the buffer -// when the function returns. -// -// Errors that occur during compression will be reported later, and a nil error -// does not signify that the compression succeeded (since it is most likely still running) -// That means that the call that returns an error may not be the call that caused it. -// Only Flush and Close functions are guaranteed to return any errors up to that point. -func (z *Writer) Write(p []byte) (int, error) { - if err := z.checkError(); err != nil { - return 0, err - } - // Write the GZIP header lazily. - if !z.wroteHeader { - z.wroteHeader = true - z.buf[0] = gzipID1 - z.buf[1] = gzipID2 - z.buf[2] = gzipDeflate - z.buf[3] = 0 - if z.Extra != nil { - z.buf[3] |= 0x04 - } - if z.Name != "" { - z.buf[3] |= 0x08 - } - if z.Comment != "" { - z.buf[3] |= 0x10 - } - put4(z.buf[4:8], uint32(z.ModTime.Unix())) - if z.level == BestCompression { - z.buf[8] = 2 - } else if z.level == BestSpeed { - z.buf[8] = 4 - } else { - z.buf[8] = 0 - } - z.buf[9] = z.OS - var n int - var err error - n, err = z.w.Write(z.buf[0:10]) - if err != nil { - z.pushError(err) - return n, err - } - if z.Extra != nil { - err = z.writeBytes(z.Extra) - if err != nil { - z.pushError(err) - return n, err - } - } - if z.Name != "" { - err = z.writeString(z.Name) - if err != nil { - z.pushError(err) - return n, err - } - } - if z.Comment != "" { - err = z.writeString(z.Comment) - if err != nil { - z.pushError(err) - return n, err - } - } - // Start receiving data from compressors - go func() { - listen := z.results - var failed bool - for { - r, ok := <-listen - // If closed, we are finished. - if !ok { - return - } - if failed { - close(r.notifyWritten) - continue - } - buf := <-r.result - n, err := z.w.Write(buf) - if err != nil { - z.pushError(err) - close(r.notifyWritten) - failed = true - continue - } - if n != len(buf) { - z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) - failed = true - close(r.notifyWritten) - continue - } - z.dstPool.Put(buf) - close(r.notifyWritten) - } - }() - z.currentBuffer = z.dstPool.Get().([]byte) - z.currentBuffer = z.currentBuffer[:0] - } - q := p - for len(q) > 0 { - length := len(q) - if length+len(z.currentBuffer) > z.blockSize { - length = z.blockSize - len(z.currentBuffer) - } - z.digest.Write(q[:length]) - z.currentBuffer = append(z.currentBuffer, q[:length]...) - if len(z.currentBuffer) > z.blockSize { - panic("z.currentBuffer too large (most likely due to concurrent Write race)") - } - if len(z.currentBuffer) == z.blockSize { - z.compressCurrent(false) - if err := z.checkError(); err != nil { - return len(p) - len(q), err - } - } - z.size += length - q = q[length:] - } - return len(p), z.checkError() -} - -// Step 1: compresses buffer to buffer -// Step 2: send writer to channel -// Step 3: Close result channel to indicate we are done -func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { - defer func() { - close(r.result) - z.wg.Done() - }() - buf := z.dstPool.Get().([]byte) // Corresponding Put in .Write's result writer - dest := bytes.NewBuffer(buf[:0]) - - compressor := z.dictFlatePool.Get().(*flate.Writer) // Put below - compressor.ResetDict(dest, prevTail) - compressor.Write(p) - z.dstPool.Put(p) // Corresponding Get in .Write and .compressCurrent - - err := compressor.Flush() - if err != nil { - z.pushError(err) - return - } - if closed { - err = compressor.Close() - if err != nil { - z.pushError(err) - return - } - } - z.dictFlatePool.Put(compressor) // Get above - - if prevTail != nil { - z.dstPool.Put(prevTail) // Get in .compressCurrent - } - - // Read back buffer - buf = dest.Bytes() - r.result <- buf -} - -// Flush flushes any pending compressed data to the underlying writer. -// -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. Flush does -// not return until the data has been written. If the underlying -// writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (z *Writer) Flush() error { - if err := z.checkError(); err != nil { - return err - } - if z.closed { - return nil - } - if !z.wroteHeader { - _, err := z.Write(nil) - if err != nil { - return err - } - } - // We send current block to compression - z.compressCurrent(true) - - return z.checkError() -} - -// UncompressedSize will return the number of bytes written. -// pgzip only, not a function in the official gzip package. -func (z *Writer) UncompressedSize() int { - return z.size -} - -// Close closes the Writer, flushing any unwritten data to the underlying -// io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if err := z.checkError(); err != nil { - return err - } - if z.closed { - return nil - } - - z.closed = true - if !z.wroteHeader { - z.Write(nil) - if err := z.checkError(); err != nil { - return err - } - } - z.compressCurrent(true) - if err := z.checkError(); err != nil { - return err - } - close(z.results) - put4(z.buf[0:4], z.digest.Sum32()) - put4(z.buf[4:8], uint32(z.size)) - _, err := z.w.Write(z.buf[0:8]) - if err != nil { - z.pushError(err) - return err - } - return nil -} diff --git a/vendor/github.com/lufia/plan9stats/.gitignore b/vendor/github.com/lufia/plan9stats/.gitignore deleted file mode 100644 index f1c181ec9..000000000 --- a/vendor/github.com/lufia/plan9stats/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out diff --git a/vendor/github.com/lufia/plan9stats/LICENSE b/vendor/github.com/lufia/plan9stats/LICENSE deleted file mode 100644 index a6d47e807..000000000 --- a/vendor/github.com/lufia/plan9stats/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2019, KADOTA, Kyohei -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md deleted file mode 100644 index a21700c0c..000000000 --- a/vendor/github.com/lufia/plan9stats/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# plan9stats -A module for retrieving statistics of Plan 9 diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go deleted file mode 100644 index a101b9119..000000000 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ /dev/null @@ -1,288 +0,0 @@ -package stats - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" -) - -// CPUType represents /dev/cputype. -type CPUType struct { - Name string - Clock int // clock rate in MHz -} - -func ReadCPUType(ctx context.Context, opts ...Option) (*CPUType, error) { - cfg := newConfig(opts...) - var c CPUType - if err := readCPUType(cfg.rootdir, &c); err != nil { - return nil, err - } - return &c, nil -} - -type SysStats struct { - ID int - NumCtxSwitch int64 - NumInterrupt int64 - NumSyscall int64 - NumFault int64 - NumTLBFault int64 - NumTLBPurge int64 - LoadAvg int64 // in units of milli-CPUs and is decayed over time - Idle int // percentage - Interrupt int // percentage -} - -// ReadSysStats reads system statistics from /dev/sysstat. -func ReadSysStats(ctx context.Context, opts ...Option) ([]*SysStats, error) { - cfg := newConfig(opts...) - file := filepath.Join(cfg.rootdir, "/dev/sysstat") - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - var stats []*SysStats - for scanner.Scan() { - a := strings.Fields(scanner.Text()) - if len(a) != 10 { - continue - } - var ( - p intParser - stat SysStats - ) - stat.ID = p.ParseInt(a[0], 10) - stat.NumCtxSwitch = p.ParseInt64(a[1], 10) - stat.NumInterrupt = p.ParseInt64(a[2], 10) - stat.NumSyscall = p.ParseInt64(a[3], 10) - stat.NumFault = p.ParseInt64(a[4], 10) - stat.NumTLBFault = p.ParseInt64(a[5], 10) - stat.NumTLBPurge = p.ParseInt64(a[6], 10) - stat.LoadAvg = p.ParseInt64(a[7], 10) - stat.Idle = p.ParseInt(a[8], 10) - stat.Interrupt = p.ParseInt(a[9], 10) - if err := p.Err(); err != nil { - return nil, err - } - stats = append(stats, &stat) - } - if err := scanner.Err(); err != nil { - return nil, err - } - return stats, nil -} - -func readCPUType(rootdir string, c *CPUType) error { - file := filepath.Join(rootdir, "/dev/cputype") - b, err := ioutil.ReadFile(file) - if err != nil { - return err - } - b = bytes.TrimSpace(b) - i := bytes.LastIndexByte(b, ' ') - if i < 0 { - return fmt.Errorf("%s: invalid format", file) - } - clock, err := strconv.Atoi(string(b[i+1:])) - if err != nil { - return err - } - c.Name = string(b[:i]) - c.Clock = clock - return nil -} - -// Time represents /dev/time. -type Time struct { - Unix time.Duration - UnixNano time.Duration - Ticks int64 // clock ticks - Freq int64 //cloc frequency -} - -// Uptime returns uptime. -func (t *Time) Uptime() time.Duration { - v := float64(t.Ticks) / float64(t.Freq) - return time.Duration(v*1000_000_000) * time.Nanosecond -} - -func ReadTime(ctx context.Context, opts ...Option) (*Time, error) { - cfg := newConfig(opts...) - file := filepath.Join(cfg.rootdir, "/dev/time") - var t Time - if err := readTime(file, &t); err != nil { - return nil, err - } - return &t, nil -} - -// ProcStatus represents a /proc/n/status. -type ProcStatus struct { - Name string - User string - State string - Times CPUTime - MemUsed int64 // in units of 1024 bytes - BasePriority uint32 // 0(low) to 19(high) - Priority uint32 // 0(low) to 19(high) -} - -// CPUTime represents /dev/cputime or a part of /proc/n/status. -type CPUTime struct { - User time.Duration // the time in user mode (millisecconds) - Sys time.Duration - Real time.Duration - ChildUser time.Duration // exited children and descendants time in user mode - ChildSys time.Duration - ChildReal time.Duration -} - -// CPUStats emulates Linux's /proc/stat. -type CPUStats struct { - User time.Duration - Sys time.Duration - Idle time.Duration -} - -func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { - cfg := newConfig(opts...) - a, err := ReadSysStats(ctx, opts...) - if err != nil { - return nil, err - } - - dir := filepath.Join(cfg.rootdir, "/proc") - d, err := os.Open(dir) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(0) - if err != nil { - return nil, err - } - var up uint32parser - pids := make([]uint32, len(names)) - for i, s := range names { - pids[i] = up.Parse(s) - } - if up.err != nil { - return nil, err - } - sort.Slice(pids, func(i, j int) bool { - return pids[i] < pids[j] - }) - - var stat CPUStats - for _, pid := range pids { - s := strconv.FormatUint(uint64(pid), 10) - file := filepath.Join(dir, s, "status") - var p ProcStatus - if err := readProcStatus(file, &p); err != nil { - return nil, err - } - stat.User += p.Times.User - stat.Sys += p.Times.Sys - } - - var t Time - file := filepath.Join(cfg.rootdir, "/dev/time") - if err := readTime(file, &t); err != nil { - return nil, err - } - // In multi-processor host, Idle should multiple by number of cores. - u := t.Uptime() * time.Duration(len(a)) - stat.Idle = u - stat.User - stat.Sys - return &stat, nil -} - -func readProcStatus(file string, p *ProcStatus) error { - b, err := ioutil.ReadFile(file) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - fields := strings.Fields(string(b)) - if len(fields) != 12 { - return errors.New("invalid format") - } - p.Name = string(fields[0]) - p.User = string(fields[1]) - p.State = string(fields[2]) - var up uint32parser - p.Times.User = time.Duration(up.Parse(fields[3])) * time.Millisecond - p.Times.Sys = time.Duration(up.Parse(fields[4])) * time.Millisecond - p.Times.Real = time.Duration(up.Parse(fields[5])) * time.Millisecond - p.Times.ChildUser = time.Duration(up.Parse(fields[6])) * time.Millisecond - p.Times.ChildSys = time.Duration(up.Parse(fields[7])) * time.Millisecond - p.Times.ChildReal = time.Duration(up.Parse(fields[8])) * time.Millisecond - p.MemUsed, err = strconv.ParseInt(fields[9], 10, 64) - if err != nil { - return err - } - p.BasePriority = up.Parse(fields[10]) - p.Priority = up.Parse(fields[11]) - return up.err -} - -func readTime(file string, t *Time) error { - b, err := ioutil.ReadFile(file) - if err != nil { - return err - } - fields := strings.Fields(string(b)) - if len(fields) != 4 { - return errors.New("invalid format") - } - n, err := strconv.ParseInt(fields[0], 10, 32) - if err != nil { - return err - } - t.Unix = time.Duration(n) * time.Second - v, err := strconv.ParseInt(fields[1], 10, 64) - if err != nil { - return err - } - t.UnixNano = time.Duration(v) * time.Nanosecond - t.Ticks, err = strconv.ParseInt(fields[2], 10, 64) - if err != nil { - return err - } - t.Freq, err = strconv.ParseInt(fields[3], 10, 64) - if err != nil { - return err - } - return nil -} - -type uint32parser struct { - err error -} - -func (p *uint32parser) Parse(s string) uint32 { - if p.err != nil { - return 0 - } - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - p.err = err - return 0 - } - return uint32(n) -} diff --git a/vendor/github.com/lufia/plan9stats/doc.go b/vendor/github.com/lufia/plan9stats/doc.go deleted file mode 100644 index 10e398e7a..000000000 --- a/vendor/github.com/lufia/plan9stats/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package stats provides statistic utilities for Plan 9. -package stats diff --git a/vendor/github.com/lufia/plan9stats/host.go b/vendor/github.com/lufia/plan9stats/host.go deleted file mode 100644 index 957e90348..000000000 --- a/vendor/github.com/lufia/plan9stats/host.go +++ /dev/null @@ -1,303 +0,0 @@ -package stats - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "strconv" - "strings" -) - -var ( - delim = []byte{' '} -) - -// Host represents host status. -type Host struct { - Sysname string - Storages []*Storage - Interfaces []*Interface -} - -// MemStats represents the memory statistics. -type MemStats struct { - Total int64 // total memory in byte - PageSize int64 // a page size in byte - KernelPages int64 - UserPages Gauge - SwapPages Gauge - - Malloced Gauge // kernel malloced data in byte - Graphics Gauge // kernel graphics data in byte -} - -// Gauge is used/available gauge. -type Gauge struct { - Used int64 - Avail int64 -} - -func (g Gauge) Free() int64 { - return g.Avail - g.Used -} - -// ReadMemStats reads memory statistics from /dev/swap. -func ReadMemStats(ctx context.Context, opts ...Option) (*MemStats, error) { - cfg := newConfig(opts...) - swap := filepath.Join(cfg.rootdir, "/dev/swap") - f, err := os.Open(swap) - if err != nil { - return nil, err - } - defer f.Close() - - var stat MemStats - m := map[string]interface{}{ - "memory": &stat.Total, - "pagesize": &stat.PageSize, - "kernel": &stat.KernelPages, - "user": &stat.UserPages, - "swap": &stat.SwapPages, - "kernel malloc": &stat.Malloced, - "kernel draw": &stat.Graphics, - } - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.SplitN(scanner.Bytes(), delim, 2) - if len(fields) < 2 { - continue - } - switch key := string(fields[1]); key { - case "memory", "pagesize", "kernel": - v := m[key].(*int64) - n, err := strconv.ParseInt(string(fields[0]), 10, 64) - if err != nil { - return nil, err - } - *v = n - case "user", "swap", "kernel malloc", "kernel draw": - v := m[key].(*Gauge) - if err := parseGauge(string(fields[0]), v); err != nil { - return nil, err - } - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &stat, nil -} - -func parseGauge(s string, r *Gauge) error { - a := strings.SplitN(s, "/", 2) - if len(a) != 2 { - return fmt.Errorf("can't parse ratio: %s", s) - } - var p intParser - u := p.ParseInt64(a[0], 10) - n := p.ParseInt64(a[1], 10) - if err := p.Err(); err != nil { - return err - } - r.Used = u - r.Avail = n - return nil -} - -type Storage struct { - Name string - Model string - Capacity int64 -} - -type Interface struct { - Name string - Addr string -} - -const ( - numEther = 8 // see ether(3) - numIpifc = 16 // see ip(3) -) - -// ReadInterfaces reads network interfaces from etherN. -func ReadInterfaces(ctx context.Context, opts ...Option) ([]*Interface, error) { - cfg := newConfig(opts...) - var a []*Interface - for i := 0; i < numEther; i++ { - p, err := readInterface(cfg.rootdir, i) - if os.IsNotExist(err) { - continue - } - if err != nil { - return nil, err - } - a = append(a, p) - } - return a, nil -} - -func readInterface(netroot string, i int) (*Interface, error) { - ether := fmt.Sprintf("ether%d", i) - dir := filepath.Join(netroot, ether) - info, err := os.Stat(dir) - if err != nil { - return nil, err - } - if !info.IsDir() { - return nil, fmt.Errorf("%s: is not directory", dir) - } - - addr, err := ioutil.ReadFile(filepath.Join(dir, "addr")) - if err != nil { - return nil, err - } - return &Interface{ - Name: ether, - Addr: string(addr), - }, nil -} - -var ( - netdirs = []string{"/net", "/net.alt"} -) - -// ReadHost reads host status. -func ReadHost(ctx context.Context, opts ...Option) (*Host, error) { - cfg := newConfig(opts...) - var h Host - name, err := readSysname(cfg.rootdir) - if err != nil { - return nil, err - } - h.Sysname = name - - a, err := readStorages(cfg.rootdir) - if err != nil { - return nil, err - } - h.Storages = a - - for _, s := range netdirs { - netroot := filepath.Join(cfg.rootdir, s) - ifaces, err := ReadInterfaces(ctx, WithRootDir(netroot)) - if err != nil { - return nil, err - } - h.Interfaces = append(h.Interfaces, ifaces...) - } - return &h, nil -} - -func readSysname(rootdir string) (string, error) { - file := filepath.Join(rootdir, "/dev/sysname") - b, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - return string(bytes.TrimSpace(b)), nil -} - -func readStorages(rootdir string) ([]*Storage, error) { - sdctl := filepath.Join(rootdir, "/dev/sdctl") - f, err := os.Open(sdctl) - if err != nil { - return nil, err - } - defer f.Close() - - var a []*Storage - scanner := bufio.NewScanner(f) - for scanner.Scan() { - fields := bytes.Split(scanner.Bytes(), delim) - if len(fields) == 0 { - continue - } - exp := string(fields[0]) + "*" - if !strings.HasPrefix(exp, "sd") { - continue - } - dir := filepath.Join(rootdir, "/dev", exp) - m, err := filepath.Glob(dir) - if err != nil { - return nil, err - } - for _, dir := range m { - s, err := readStorage(dir) - if err != nil { - return nil, err - } - a = append(a, s) - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return a, nil -} - -func readStorage(dir string) (*Storage, error) { - ctl := filepath.Join(dir, "ctl") - f, err := os.Open(ctl) - if err != nil { - return nil, err - } - defer f.Close() - - var s Storage - s.Name = filepath.Base(dir) - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Bytes() - switch { - case bytes.HasPrefix(line, []byte("inquiry")): - s.Model = string(bytes.TrimSpace(line[7:])) - case bytes.HasPrefix(line, []byte("geometry")): - fields := bytes.Split(line, delim) - if len(fields) < 3 { - continue - } - var p intParser - sec := p.ParseInt64(string(fields[1]), 10) - size := p.ParseInt64(string(fields[2]), 10) - if err := p.Err(); err != nil { - return nil, err - } - s.Capacity = sec * size - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &s, nil -} - -type IPStats struct { - ID int // number of interface in ipifc dir - Device string // associated physical device - MTU int // max transfer unit - Sendra6 uint8 // on == send router adv - Recvra6 uint8 // on == recv router adv - - Pktin int64 // packets read - Pktout int64 // packets written - Errin int64 // read errors - Errout int64 // write errors -} - -type Iplifc struct { - IP net.IP - Mask net.IPMask - Net net.IP // ip & mask - PerfLifetime int64 // preferred lifetime - ValidLifetime int64 // valid lifetime -} - -type Ipv6rp struct { - // TODO(lufia): see ip(2) -} diff --git a/vendor/github.com/lufia/plan9stats/int.go b/vendor/github.com/lufia/plan9stats/int.go deleted file mode 100644 index db133c43e..000000000 --- a/vendor/github.com/lufia/plan9stats/int.go +++ /dev/null @@ -1,31 +0,0 @@ -package stats - -import ( - "strconv" -) - -type intParser struct { - err error -} - -func (p *intParser) ParseInt(s string, base int) int { - if p.err != nil { - return 0 - } - var n int64 - n, p.err = strconv.ParseInt(s, base, 0) - return int(n) -} - -func (p *intParser) ParseInt64(s string, base int) int64 { - if p.err != nil { - return 0 - } - var n int64 - n, p.err = strconv.ParseInt(s, base, 64) - return n -} - -func (p *intParser) Err() error { - return p.err -} diff --git a/vendor/github.com/lufia/plan9stats/opts.go b/vendor/github.com/lufia/plan9stats/opts.go deleted file mode 100644 index 05b7d036a..000000000 --- a/vendor/github.com/lufia/plan9stats/opts.go +++ /dev/null @@ -1,21 +0,0 @@ -package stats - -type Config struct { - rootdir string -} - -type Option func(*Config) - -func newConfig(opts ...Option) *Config { - var cfg Config - for _, opt := range opts { - opt(&cfg) - } - return &cfg -} - -func WithRootDir(dir string) Option { - return func(cfg *Config) { - cfg.rootdir = dir - } -} diff --git a/vendor/github.com/lufia/plan9stats/stats.go b/vendor/github.com/lufia/plan9stats/stats.go deleted file mode 100644 index d4ecdcfa0..000000000 --- a/vendor/github.com/lufia/plan9stats/stats.go +++ /dev/null @@ -1,88 +0,0 @@ -package stats - -import ( - "bufio" - "context" - "os" - "path/filepath" - "strings" -) - -type InterfaceStats struct { - PacketsReceived int64 // in packets - Link int // link status - PacketsSent int64 // out packets - NumCRCErr int // input CRC errors - NumOverflows int // packet overflows - NumSoftOverflows int // software overflow - NumFramingErr int // framing errors - NumBufferingErr int // buffering errors - NumOutputErr int // output errors - Promiscuous int // number of promiscuous opens - Mbps int // megabits per sec - Addr string -} - -func ReadInterfaceStats(ctx context.Context, opts ...Option) (*InterfaceStats, error) { - cfg := newConfig(opts...) - file := filepath.Join(cfg.rootdir, "stats") - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var stats InterfaceStats - scanner := bufio.NewScanner(f) - for scanner.Scan() { - s := strings.TrimSpace(scanner.Text()) - a := strings.SplitN(s, ":", 2) - if len(a) != 2 { - continue - } - var p intParser - v := strings.TrimSpace(a[1]) - switch a[0] { - case "in": - stats.PacketsReceived = p.ParseInt64(v, 10) - case "link": - stats.Link = p.ParseInt(v, 10) - case "out": - stats.PacketsSent = p.ParseInt64(v, 10) - case "crc": - stats.NumCRCErr = p.ParseInt(v, 10) - case "overflows": - stats.NumOverflows = p.ParseInt(v, 10) - case "soft overflows": - stats.NumSoftOverflows = p.ParseInt(v, 10) - case "framing errs": - stats.NumFramingErr = p.ParseInt(v, 10) - case "buffer errs": - stats.NumBufferingErr = p.ParseInt(v, 10) - case "output errs": - stats.NumOutputErr = p.ParseInt(v, 10) - case "prom": - stats.Promiscuous = p.ParseInt(v, 10) - case "mbps": - stats.Mbps = p.ParseInt(v, 10) - case "addr": - stats.Addr = v - } - if err := p.Err(); err != nil { - return nil, err - } - } - if err := scanner.Err(); err != nil { - return nil, err - } - return &stats, nil -} - -type TCPStats struct { - MaxConn int - MaxSegment int - ActiveOpens int - PassiveOpens int - EstablishedResets int - CurrentEstablished int -} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff52..000000000 --- a/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24f..000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025..000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c7979315..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6e..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b404..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c75823490..000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac3..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/moby/docker-image-spec/LICENSE b/vendor/github.com/moby/docker-image-spec/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/moby/docker-image-spec/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go b/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go deleted file mode 100644 index 167261763..000000000 --- a/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1 - -import ( - "time" - - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json" - -// DockerOCIImage is a ocispec.Image extended with Docker specific Config. -type DockerOCIImage struct { - ocispec.Image - - // Shadow ocispec.Image.Config - Config DockerOCIImageConfig `json:"config,omitempty"` -} - -// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields. -type DockerOCIImageConfig struct { - ocispec.ImageConfig - - DockerOCIImageConfigExt -} - -// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig. -type DockerOCIImageConfigExt struct { - Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - - OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile - Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature. -type HealthcheckConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} diff --git a/vendor/github.com/moby/patternmatcher/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/moby/patternmatcher/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE deleted file mode 100644 index e5154640f..000000000 --- a/vendor/github.com/moby/patternmatcher/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/ignorefile/ignorefile.go b/vendor/github.com/moby/patternmatcher/ignorefile/ignorefile.go deleted file mode 100644 index 94ea5a0ef..000000000 --- a/vendor/github.com/moby/patternmatcher/ignorefile/ignorefile.go +++ /dev/null @@ -1,73 +0,0 @@ -package ignorefile - -import ( - "bufio" - "bytes" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads an ignore file from a reader and returns the list of file -// patterns to ignore, applying the following rules: -// -// - An UTF8 BOM header (if present) is stripped. -// - Lines starting with "#" are considered comments and are skipped. -// -// For remaining lines: -// -// - Leading and trailing whitespace is removed from each ignore pattern. -// - It uses [filepath.Clean] to get the shortest/cleanest path for -// ignore patterns. -// - Leading forward-slashes ("/") are removed from ignore patterns, -// so "/some/path" and "some/path" are considered equivalent. -func ReadAll(reader io.Reader) ([]string, error) { - if reader == nil { - return nil, nil - } - - var excludes []string - currentLine := 0 - utf8bom := []byte{0xEF, 0xBB, 0xBF} - - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - // normalize absolute paths to paths relative to the context - // (taking care of '!' prefix) - invert := pattern[0] == '!' - if invert { - pattern = strings.TrimSpace(pattern[1:]) - } - if len(pattern) > 0 { - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - if len(pattern) > 1 && pattern[0] == '/' { - pattern = pattern[1:] - } - } - if invert { - pattern = "!" + pattern - } - - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, err - } - return excludes, nil -} diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go deleted file mode 100644 index 37a1a59ac..000000000 --- a/vendor/github.com/moby/patternmatcher/patternmatcher.go +++ /dev/null @@ -1,474 +0,0 @@ -package patternmatcher - -import ( - "errors" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - "unicode/utf8" -) - -// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. -var escapeBytes [8]byte - -// shouldEscape reports whether a rune should be escaped as part of the regex. -// -// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. -// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator -// on Windows. -// -// Adapted from regexp::QuoteMeta in go stdlib. -// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 -func shouldEscape(b rune) bool { - return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 -} - -func init() { - for _, b := range []byte(`.+()|{}$`) { - escapeBytes[b%8] |= 1 << (b / 8) - } -} - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// New creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func New(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -// -// Deprecated: This implementation is buggy (it only checks a single parent dir -// against the pattern) and will be removed soon. Use either -// MatchesOrParentMatches or MatchesUsingParentResults instead. -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesOrParentMatches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesUsingParentResult returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller keeps track of -// whether the parent directory matched. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResult is not safe to call concurrently. -// -// Deprecated: this function does behave correctly in some cases (see -// https://github.com/docker/buildx/issues/850). -// -// Use MatchesUsingParentResults instead. -func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { - matched := parentMatched - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !pattern.exclusion - } - } - return matched, nil -} - -// MatchInfo tracks information about parent dir matches while traversing a -// filesystem. -type MatchInfo struct { - parentMatched []bool -} - -// MatchesUsingParentResults returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller passes in -// intermediate results from matching the parent directory. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResults is not safe to call concurrently. -func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { - parentMatched := parentMatchInfo.parentMatched - if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { - return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") - } - - file = filepath.FromSlash(file) - matched := false - - matchInfo := MatchInfo{ - parentMatched: make([]bool, len(pm.patterns)), - } - for i, pattern := range pm.patterns { - match := false - // If the parent matched this pattern, we don't need to recheck. - if len(parentMatched) != 0 { - match = parentMatched[i] - } - - if !match { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - var err error - match, err = pattern.match(file) - if err != nil { - return false, matchInfo, err - } - - // If the zero value of MatchInfo was passed in, we don't have - // any information about the parent dir's match results, and we - // apply the same logic as MatchesOrParentMatches. - if !match && len(parentMatched) == 0 { - if parentPath := filepath.Dir(file); parentPath != "." { - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - } - } - matchInfo.parentMatched[i] = match - - if match { - matched = !pattern.exclusion - } - } - return matched, matchInfo, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - matchType matchType - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -type matchType int - -const ( - unknownMatch matchType = iota - exactMatch - prefixMatch - suffixMatch - regexpMatch -) - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - if p.matchType == unknownMatch { - if err := p.compile(string(os.PathSeparator)); err != nil { - return false, filepath.ErrBadPattern - } - } - - switch p.matchType { - case exactMatch: - return path == p.cleanedPattern, nil - case prefixMatch: - // strip trailing ** - return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil - case suffixMatch: - // strip leading ** - suffix := p.cleanedPattern[2:] - if strings.HasSuffix(path, suffix) { - return true, nil - } - // **/foo matches "foo" - return suffix[0] == os.PathSeparator && path == suffix[1:], nil - case regexpMatch: - return p.regexp.MatchString(path), nil - } - - return false, nil -} - -func (p *Pattern) compile(sl string) error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - escSL := sl - if sl == `\` { - escSL += `\` - } - - p.matchType = exactMatch - for i := 0; scan.Peek() != scanner.EOF; i++ { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - if p.matchType == exactMatch { - p.matchType = prefixMatch - } else { - regStr += ".*" - p.matchType = regexpMatch - } - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - p.matchType = regexpMatch - } - - if i == 0 { - p.matchType = suffixMatch - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - p.matchType = regexpMatch - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - p.matchType = regexpMatch - } else if shouldEscape(ch) { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - p.matchType = regexpMatch - } else { - regStr += `\` - } - } else if ch == '[' || ch == ']' { - regStr += string(ch) - p.matchType = regexpMatch - } else { - regStr += string(ch) - } - } - - if p.matchType != regexpMatch { - return nil - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - p.matchType = regexpMatch - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// This implementation is buggy (it only checks a single parent dir against the -// pattern) and will be removed soon. Use MatchesOrParentMatches instead. -func Matches(file string, patterns []string) (bool, error) { - pm, err := New(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - -// MatchesOrParentMatches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func MatchesOrParentMatches(file string, patterns []string) (bool, error) { - pm, err := New(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.MatchesOrParentMatches(file) -} diff --git a/vendor/github.com/moby/sys/sequential/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/moby/sys/sequential/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go deleted file mode 100644 index af2817504..000000000 --- a/vendor/github.com/moby/sys/sequential/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package sequential provides a set of functions for managing sequential -// files on Windows. -// -// The origin of these functions are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. -// -// For non-Windows platforms, the package provides wrappers for the equivalents -// in the os packages. They are passthrough on Unix platforms, and only relevant -// on Windows. -package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go deleted file mode 100644 index a3c7340e3..000000000 --- a/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build !windows -// +build !windows - -package sequential - -import "os" - -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return os.Create(name) -} - -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func Open(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func CreateTemp(dir, prefix string) (f *os.File, err error) { - return os.CreateTemp(dir, prefix) -} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go deleted file mode 100644 index 3f7f0d83e..000000000 --- a/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ /dev/null @@ -1,161 +0,0 @@ -package sequential - -import ( - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} -} - -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func CreateTemp(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/moby/sys/user/LICENSE b/vendor/github.com/moby/sys/user/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/moby/sys/user/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/sys/user/lookup_unix.go b/vendor/github.com/moby/sys/user/lookup_unix.go deleted file mode 100644 index f95c1409f..000000000 --- a/vendor/github.com/moby/sys/user/lookup_unix.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupUserFunc(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Gid == gid - }) -} - -func lookupGroupFunc(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(unix.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(unix.Getgid()) -} - -func currentUserSubIDs(fileName string) ([]SubID, error) { - u, err := CurrentUser() - if err != nil { - return nil, err - } - filter := func(entry SubID) bool { - return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) - } - return ParseSubIDFileFilter(fileName, filter) -} - -func CurrentUserSubUIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subuid") -} - -func CurrentUserSubGIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subgid") -} - -func CurrentProcessUIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/uid_map") -} - -func CurrentProcessGIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/gid_map") -} diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go deleted file mode 100644 index 984466d1a..000000000 --- a/vendor/github.com/moby/sys/user/user.go +++ /dev/null @@ -1,605 +0,0 @@ -package user - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minID = 0 - maxID = 1<<31 - 1 // for 32-bit systems compatibility -) - -var ( - // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. - ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. - ErrNoGroupEntries = errors.New("no matching entries in group file") - // ErrRange is returned if a UID or GID is outside of the valid range. - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -// SubID represents an entry in /etc/sub{u,g}id -type SubID struct { - Name string - SubID int64 - Count int64 -} - -// IDMap represents an entry in /proc/PID/{u,g}id_map -type IDMap struct { - ID int64 - ParentID int64 - Count int64 -} - -func parseLine(line []byte, v ...interface{}) { - parseParts(bytes.Split(line, []byte(":")), v...) -} - -func parseParts(parts [][]byte, v ...interface{}) { - if len(parts) == 0 { - return - } - - for i, p := range parts { - // Ignore cases where we don't have enough fields to populate the arguments. - // Some configuration files like to misbehave. - if len(v) <= i { - break - } - - // Use the type of the argument to figure out how to parse it, scanf() style. - // This is legit. - switch e := v[i].(type) { - case *string: - *e = string(p) - case *int: - // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(string(p)) - case *int64: - *e, _ = strconv.ParseInt(string(p), 10, 64) - case *[]string: - // Comma-separated lists. - if len(p) != 0 { - *e = strings.Split(string(p), ",") - } else { - *e = []string{} - } - default: - // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, errors.New("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, errors.New("nil source for group-formatted data") - } - rd := bufio.NewReader(r) - out := []Group{} - - // Read the file line-by-line. - for { - var ( - isPrefix bool - wholeLine []byte - err error - ) - - // Read the next line. We do so in chunks (as much as reader's - // buffer is able to keep), check if we read enough columns - // already on each step and store final result in wholeLine. - for { - var line []byte - line, isPrefix, err = rd.ReadLine() - - if err != nil { - // We should return no error if EOF is reached - // without a match. - if err == io.EOF { - err = nil - } - return out, err - } - - // Simple common case: line is short enough to fit in a - // single reader's buffer. - if !isPrefix && len(wholeLine) == 0 { - wholeLine = line - break - } - - wholeLine = append(wholeLine, line...) - - // Check if we read the whole line already. - if !isPrefix { - break - } - } - - // There's no spec for /etc/passwd or /etc/group, but we try to follow - // the same rules as the glibc parser, which allows comments and blank - // space at the beginning of a line. - wholeLine = bytes.TrimSpace(wholeLine) - if len(wholeLine) == 0 || wholeLine[0] == '#' { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) - - if filter == nil || filter(p) { - out = append(out, p) - } - } -} - -type ExecUser struct { - Uid int - Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - var passwd, group io.Reader - - if passwdFile, err := os.Open(passwdPath); err == nil { - passwd = passwdFile - defer passwdFile.Close() - } - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// - "" -// - "user" -// - "uid" -// - "user:group" -// - "uid:gid -// - "user:gid" -// - "uid:group" -// -// It should be noted that if you specify a numeric user or group id, they will -// not be evaluated as usernames (only the metadata will be filled). So attempting -// to parse a user with user.Name = "1337" will produce the user with a UID of -// 1337. -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax - var userArg, groupArg string - parseLine([]byte(userSpec), &userArg, &groupArg) - - // Convert userArg and groupArg to be numeric, so we don't have to execute - // Atoi *twice* for each iteration over lines. - uidArg, uidErr := strconv.Atoi(userArg) - gidArg, gidErr := strconv.Atoi(groupArg) - - // Find the matching user. - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - // Default to current state of the user. - return u.Uid == user.Uid - } - - if uidErr == nil { - // If the userArg is numeric, always treat it as a UID. - return uidArg == u.Uid - } - - return u.Name == userArg - }) - - // If we can't find the user, we have to bail. - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) - } - - var matchedUserName string - if len(users) > 0 { - // First match wins, even if there's more than one matching entry. - matchedUserName = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // If we can't find a user with the given username, the only other valid - // option is if it's a numeric username with no associated entry in passwd. - - if uidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) - } - user.Uid = uidArg - - // Must be inside valid uid range. - if user.Uid < minID || user.Uid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - - // On to the groups. If we matched a username, we need to do this because of - // the supplementary group IDs. - if groupArg != "" || matchedUserName != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // If the group argument isn't explicit, we'll just search for it. - if groupArg == "" { - // Check if user is a member of this group. - for _, u := range g.List { - if u == matchedUserName { - return true - } - } - return false - } - - if gidErr == nil { - // If the groupArg is numeric, always treat it as a GID. - return gidArg == g.Gid - } - - return g.Name == groupArg - }) - if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) - } - - // Only start modifying user.Gid if it is in explicit form. - if groupArg != "" { - if len(groups) > 0 { - // First match wins, even if there's more than one matching entry. - user.Gid = groups[0].Gid - } else { - // If we can't find a group with the given name, the only other valid - // option is if it's a numeric group name with no associated entry in group. - - if gidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) - } - user.Gid = gidArg - - // Must be inside valid gid range. - if user.Gid < minID || user.Gid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - groups := []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.ParseInt(ag, 10, 64) - if err != nil { - // Not a numeric ID either. - return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) - } - // Ensure gid is inside gid range. - if gid < minID || gid > maxID { - return nil, ErrRange - } - gidMap[int(gid)] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - var group io.Reader - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} - -func ParseSubIDFile(path string) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubID(subid) -} - -func ParseSubID(subid io.Reader) ([]SubID, error) { - return ParseSubIDFilter(subid, nil) -} - -func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubIDFilter(subid, filter) -} - -func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { - if r == nil { - return nil, errors.New("nil source for subid-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []SubID{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 subuid - p := SubID{} - parseLine(line, &p.Name, &p.SubID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseIDMapFile(path string) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMap(r) -} - -func ParseIDMap(r io.Reader) ([]IDMap, error) { - return ParseIDMapFilter(r, nil) -} - -func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMapFilter(r, filter) -} - -func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { - if r == nil { - return nil, errors.New("nil source for idmap-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []IDMap{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 7 user_namespaces - p := IDMap{} - parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} diff --git a/vendor/github.com/moby/sys/user/user_fuzzer.go b/vendor/github.com/moby/sys/user/user_fuzzer.go deleted file mode 100644 index e018eae61..000000000 --- a/vendor/github.com/moby/sys/user/user_fuzzer.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package user - -import ( - "io" - "strings" -) - -func IsDivisbleBy(n int, divisibleby int) bool { - return (n % divisibleby) == 0 -} - -func FuzzUser(data []byte) int { - if len(data) == 0 { - return -1 - } - if !IsDivisbleBy(len(data), 5) { - return -1 - } - - var divided [][]byte - - chunkSize := len(data) / 5 - - for i := 0; i < len(data); i += chunkSize { - end := i + chunkSize - - divided = append(divided, data[i:end]) - } - - _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) - - var passwd, group io.Reader - - group = strings.NewReader(string(divided[1])) - _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) - - passwd = strings.NewReader(string(divided[3])) - _, _ = GetExecUser(string(divided[4]), nil, passwd, group) - return 1 -} diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore deleted file mode 100644 index b0747ff01..000000000 --- a/vendor/github.com/moby/term/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -profile.out -# support running go modules in vendor mode for local development -vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/moby/term/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md deleted file mode 100644 index 0ce92cc33..000000000 --- a/vendor/github.com/moby/term/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# term - utilities for dealing with terminals - -![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) - -term provides structures and helper functions to work with terminal (state, sizes). - -#### Using term - -```go -package main - -import ( - "log" - "os" - - "github.com/moby/term" -) - -func main() { - fd := os.Stdin.Fd() - if term.IsTerminal(fd) { - ws, err := term.GetWinsize(fd) - if err != nil { - log.Fatalf("term.GetWinsize: %s", err) - } - log.Printf("%d:%d\n", ws.Height, ws.Width) - } -} -``` - -## Contributing - -Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. - -## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go deleted file mode 100644 index 55873c055..000000000 --- a/vendor/github.com/moby/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go deleted file mode 100644 index c9bc03244..000000000 --- a/vendor/github.com/moby/term/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go deleted file mode 100644 index c47756b89..000000000 --- a/vendor/github.com/moby/term/proxy.go +++ /dev/null @@ -1,88 +0,0 @@ -package term - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader - buf []byte -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (n int, err error) { - if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { - return 0, EscapeError{} - } - - if len(r.buf) > 0 { - n = copy(buf, r.buf) - r.buf = r.buf[n:] - } - - nr, err := r.r.Read(buf[n:]) - n += nr - if len(r.escapeKeys) == 0 { - return n, err - } - - for i := 0; i < n; i++ { - if buf[i] == r.escapeKeys[r.escapeKeyPos] { - r.escapeKeyPos++ - - // Check if the full escape sequence is matched. - if r.escapeKeyPos == len(r.escapeKeys) { - n = i + 1 - r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, EscapeError{} - } - continue - } - - // If we need to prepend a partial escape sequence from the previous - // read, make sure the new buffer size doesn't exceed len(buf). - // Otherwise, preserve any extra data in a buffer for the next read. - if i < r.escapeKeyPos { - preserve := make([]byte, 0, r.escapeKeyPos+n) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf[:n]...) - n = copy(buf, preserve) - i += r.escapeKeyPos - r.buf = append(r.buf, preserve[n:]...) - } - r.escapeKeyPos = 0 - } - - // If we're in the middle of reading an escape sequence, make sure we don't - // let the caller read it. If later on we find that this is not the escape - // sequence, we'll prepend it back to buf. - n -= r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, err -} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go deleted file mode 100644 index f9d8988ef..000000000 --- a/vendor/github.com/moby/term/term.go +++ /dev/null @@ -1,85 +0,0 @@ -package term - -import "io" - -// State holds the platform-specific state / console mode for the terminal. -type State terminalState - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - - // Only used on Unix - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -// -// On Windows, it attempts to turn on VT handling on all std handles if -// supported, or falls back to terminal emulation. On Unix, this returns -// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return stdStreams() -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { - return getFdInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - return getWinsize(fd) -} - -// SetWinsize tries to set the specified window size for the specified file -// descriptor. It is only implemented on Unix, and returns an error on Windows. -func SetWinsize(fd uintptr, ws *Winsize) error { - return setWinsize(fd, ws) -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return isTerminal(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return restoreTerminal(fd, state) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - return saveState(fd) -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - return disableEcho(fd, state) -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this is the equivalent of -// [MakeRaw], and puts both the input and output into raw mode. On Windows, it -// only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (previousState *State, err error) { - return setRawTerminal(fd) -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { - return setRawTerminalOutput(fd) -} - -// MakeRaw puts the terminal (Windows Console) connected to the -// given file descriptor into raw mode and returns the previous state of -// the terminal so that it can be restored. -func MakeRaw(fd uintptr) (previousState *State, err error) { - return makeRaw(fd) -} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go deleted file mode 100644 index 2ec7706a1..000000000 --- a/vendor/github.com/moby/term/term_unix.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "errors" - "io" - "os" - - "golang.org/x/sys/unix" -) - -// ErrInvalidState is returned if the state of the terminal is invalid. -// -// Deprecated: ErrInvalidState is no longer used. -var ErrInvalidState = errors.New("Invalid terminal state") - -// terminalState holds the platform-specific state / console mode for the terminal. -type terminalState struct { - termios unix.Termios -} - -func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -func getFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = isTerminal(inFd) - } - return inFd, isTerminalIn -} - -func getWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -func setWinsize(fd uintptr, ws *Winsize) error { - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ - Row: ws.Height, - Col: ws.Width, - Xpixel: ws.x, - Ypixel: ws.y, - }) -} - -func isTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil -} - -func restoreTerminal(fd uintptr, state *State) error { - if state == nil { - return errors.New("invalid terminal state") - } - return tcset(fd, &state.termios) -} - -func saveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil -} - -func disableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - return tcset(fd, &newState) -} - -func setRawTerminal(fd uintptr) (*State, error) { - return makeRaw(fd) -} - -func setRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func tcget(fd uintptr) (*unix.Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *unix.Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go deleted file mode 100644 index 81ccff042..000000000 --- a/vendor/github.com/moby/term/term_windows.go +++ /dev/null @@ -1,176 +0,0 @@ -package term - -import ( - "fmt" - "io" - "os" - "os/signal" - - windowsconsole "github.com/moby/term/windows" - "golang.org/x/sys/windows" -) - -// terminalState holds the platform-specific state / console mode for the terminal. -type terminalState struct { - mode uint32 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var ( - emulateStdin, emulateStdout, emulateStderr bool - - mode uint32 - ) - - fd := windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - _ = windows.SetConsoleMode(fd, mode) - } - - fd = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - if emulateStdin { - h := uint32(windows.STD_INPUT_HANDLE) - stdIn = windowsconsole.NewAnsiReader(int(h)) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - h := uint32(windows.STD_OUTPUT_HANDLE) - stdOut = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - h := uint32(windows.STD_ERROR_HANDLE) - stdErr = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -func getFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -func getWinsize(fd uintptr) (*Winsize, error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -func setWinsize(fd uintptr, ws *Winsize) error { - return fmt.Errorf("not implemented on Windows") -} - -func isTerminal(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} - -func restoreTerminal(fd uintptr, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) -} - -func saveState(fd uintptr) (*State, error) { - var mode uint32 - - if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { - return nil, err - } - - return &State{mode: mode}, nil -} - -func disableEcho(fd uintptr, state *State) error { - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - mode := state.mode - mode &^= windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT - err := windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -func setRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, oldState) - return oldState, err -} - -func setRawTerminalOutput(fd uintptr) (*State, error) { - oldState, err := saveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return oldState, err -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - _ = RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go deleted file mode 100644 index 45f77e03c..000000000 --- a/vendor/github.com/moby/term/termios_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build darwin || freebsd || openbsd || netbsd -// +build darwin freebsd openbsd netbsd - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go deleted file mode 100644 index 88b7b2156..000000000 --- a/vendor/github.com/moby/term/termios_nonbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows -// +build !darwin,!freebsd,!netbsd,!openbsd,!windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) diff --git a/vendor/github.com/moby/term/termios_unix.go b/vendor/github.com/moby/term/termios_unix.go deleted file mode 100644 index 60c823783..000000000 --- a/vendor/github.com/moby/term/termios_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// Termios is the Unix API for terminal I/O. -// -// Deprecated: use [unix.Termios]. -type Termios = unix.Termios - -func makeRaw(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - - oldState := State{termios: *termios} - - termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON - termios.Oflag &^= unix.OPOST - termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN - termios.Cflag &^= unix.CSIZE | unix.PARENB - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := tcset(fd, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go deleted file mode 100644 index 5be4e7601..000000000 --- a/vendor/github.com/moby/term/termios_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -package term - -import "golang.org/x/sys/windows" - -func makeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil -} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go deleted file mode 100644 index fb34c547a..000000000 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) - } - - return string(rune(keyEvent.UnicodeChar)) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go deleted file mode 100644 index 4243307fd..000000000 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - - return &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go deleted file mode 100644 index 21e57bd52..000000000 --- a/vendor/github.com/moby/term/windows/console.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = isConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// -// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. -func IsConsole(fd uintptr) bool { - return isConsole(fd) -} - -func isConsole(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go deleted file mode 100644 index 54265fffa..000000000 --- a/vendor/github.com/moby/term/windows/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole diff --git a/vendor/github.com/mongodb/mongo-tools/LICENSE.md b/vendor/github.com/mongodb/mongo-tools/LICENSE.md deleted file mode 100644 index d46ac5b05..000000000 --- a/vendor/github.com/mongodb/mongo-tools/LICENSE.md +++ /dev/null @@ -1,11 +0,0 @@ -Copyright 2014 MongoDB, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in -compliance with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is -distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied. See the License for the specific language governing permissions and limitations under the -License. diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/archive.go b/vendor/github.com/mongodb/mongo-tools/common/archive/archive.go deleted file mode 100644 index de58ff5da..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/archive.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package archive - -import "io" - -// NamespaceHeader is a data structure that, as BSON, is found in archives where it indicates -// that either the subsequent stream of BSON belongs to this new namespace, or that the -// indicated namespace will have no more documents (EOF) -type NamespaceHeader struct { - Database string `bson:"db"` - Collection string `bson:"collection"` - EOF bool `bson:"EOF"` - CRC int64 `bson:"CRC"` -} - -// CollectionMetadata is a data structure that, as BSON, is found in the prelude of the archive. -// There is one CollectionMetadata per collection that will be in the archive. -// For a CollectionMetadata for collection X with Type == "timeseries", -// there will be no data for collection X. Instead there will be data for collection system.buckets.X. -// Mongorestore will restore both the timeseries view and the underlying system.buckets collection. -type CollectionMetadata struct { - Database string `bson:"db"` - Collection string `bson:"collection"` - Metadata string `bson:"metadata"` - Size int `bson:"size"` - Type string `bson:"type"` -} - -// Header is a data structure that, as BSON, is found immediately after the magic -// number in the archive, before any CollectionMetadatas. It is the home of any archive level information -type Header struct { - ConcurrentCollections int32 `bson:"concurrent_collections"` - FormatVersion string `bson:"version"` - ServerVersion string `bson:"server_version"` - ToolVersion string `bson:"tool_version"` -} - -const minBSONSize = 4 + 1 // an empty BSON document should be exactly five bytes long - -var terminator int32 = -1 -var terminatorBytes = []byte{0xFF, 0xFF, 0xFF, 0xFF} // TODO, rectify this with terminator - -// MagicNumber is four bytes that are found at the beginning of the archive that indicate that -// the byte stream is an archive, as opposed to anything else, including a stream of BSON documents -const MagicNumber uint32 = 0x8199e26d -const archiveFormatVersion = "0.1" - -// Writer is the top level object to contain information about archives in mongodump -type Writer struct { - Out io.WriteCloser - Prelude *Prelude - Mux *Multiplexer -} - -// Reader is the top level object to contain information about archives in mongorestore -type Reader struct { - In io.ReadCloser - Demux *Demultiplexer - Prelude *Prelude -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/demultiplexer.go b/vendor/github.com/mongodb/mongo-tools/common/archive/demultiplexer.go deleted file mode 100644 index 87b85e175..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/demultiplexer.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package archive - -import ( - "bytes" - "fmt" - "hash" - "hash/crc64" - "io" - "strings" - "sync" - "sync/atomic" - - "github.com/mongodb/mongo-tools/common/db" - "github.com/mongodb/mongo-tools/common/intents" - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" -) - -// DemuxOut is a Demultiplexer output consumer -// The Write() and End() occur in the same thread as the Demultiplexer runs in. -type DemuxOut interface { - Write([]byte) (int, error) - End() - Sum64() (uint64, bool) -} - -const ( - NamespaceUnopened = iota - NamespaceOpened - NamespaceClosed -) - -// Demultiplexer implements Parser. -type Demultiplexer struct { - In io.Reader - //TODO wrap up these three into a structure - outs map[string]DemuxOut - lengths map[string]int64 - currentNamespace string - buf [db.MaxBSONSize]byte - - // NamespaceChan is used to send a namespace to a consumer of namespaces. - NamespaceChan chan string - - // NamespaceErrorChan is used to receive an error or nil from a namespace - // consumer immediately after each namespace is sent and validated. - NamespaceErrorChan chan error - - NamespaceStatus map[string]int - IsAtlasProxy bool -} - -func CreateDemux(namespaceMetadatas []*CollectionMetadata, in io.Reader, isAtlasProxy bool) *Demultiplexer { - demux := &Demultiplexer{ - NamespaceStatus: make(map[string]int), - In: in, - IsAtlasProxy: isAtlasProxy, - } - for _, cm := range namespaceMetadatas { - // For atlas proxy archive restores, ignore collections from the admin DB. - if isAtlasProxy && cm.Database == "admin" { - continue - } - - var ns string - if cm.Type == "timeseries" { - ns = cm.Database + ".system.buckets." + cm.Collection - } else { - ns = cm.Database + "." + cm.Collection - } - demux.NamespaceStatus[ns] = NamespaceUnopened - } - return demux -} - -// Run creates and runs a parser with the Demultiplexer as a consumer -func (demux *Demultiplexer) Run() error { - parser := Parser{In: demux.In} - err := parser.ReadAllBlocks(demux) - if len(demux.outs) > 0 { - log.Logvf(log.Always, "demux finishing when there are still outs (%v)", len(demux.outs)) - } - - log.Logvf(log.DebugLow, "demux finishing (err:%v)", err) - return err -} - -type demuxError struct { - Err error - Msg string -} - -// Error is part of the Error interface. It formats a demuxError for human readability. -func (pe *demuxError) Error() string { - err := fmt.Sprintf("error demultiplexing archive; %v", pe.Msg) - if pe.Err != nil { - err = fmt.Sprintf("%v ( %v )", err, pe.Err) - } - return err -} - -// newError creates a demuxError with just a message -func newError(msg string) error { - return &demuxError{ - Msg: msg, - } -} - -// newWrappedError creates a demuxError with a message as well as an underlying cause error -func newWrappedError(msg string, err error) error { - return &demuxError{ - Err: err, - Msg: msg, - } -} - -// HeaderBSON is part of the ParserConsumer interface and receives headers from parser. -// Its main role is to implement opens and EOFs of the embedded stream. -func (demux *Demultiplexer) HeaderBSON(buf []byte) error { - colHeader := NamespaceHeader{} - err := bson.Unmarshal(buf, &colHeader) - if err != nil { - return newWrappedError("header bson doesn't unmarshal as a collection header", err) - } - log.Logvf(log.DebugHigh, "demux namespaceHeader: %v", colHeader) - if colHeader.Collection == "" { - return newError("collection header is missing a Collection") - } - demux.currentNamespace = colHeader.Database + "." + colHeader.Collection - - // For atlas proxy archive restores, ignore collections from the admin DB. - if demux.IsAtlasProxy && colHeader.Database == "admin" { - return nil - } - - if _, ok := demux.outs[demux.currentNamespace]; !ok { - if demux.NamespaceStatus[demux.currentNamespace] != NamespaceUnopened { - return newError("namespace header for already opened namespace") - } - demux.NamespaceStatus[demux.currentNamespace] = NamespaceOpened - if demux.NamespaceChan != nil { - demux.NamespaceChan <- demux.currentNamespace - err := <-demux.NamespaceErrorChan - if err == io.EOF { - // if the Prioritizer sends us back an io.EOF then it's telling us that - // it's finishing and doesn't need any more namespace announcements. - close(demux.NamespaceChan) - demux.NamespaceChan = nil - return nil - } - if err != nil { - return newWrappedError("failed arranging a consumer for new namespace", err) - } - } - } - if colHeader.EOF { - if rcr, ok := demux.outs[demux.currentNamespace].(*RegularCollectionReceiver); ok { - rcr.err = io.EOF - } - demux.outs[demux.currentNamespace].End() - demux.NamespaceStatus[demux.currentNamespace] = NamespaceClosed - length := int64(demux.lengths[demux.currentNamespace]) - crcUInt64, ok := demux.outs[demux.currentNamespace].Sum64() - if ok { - crc := int64(crcUInt64) - if crc != colHeader.CRC { - return fmt.Errorf("CRC mismatch for namespace %v, %v!=%v", - demux.currentNamespace, - crc, - colHeader.CRC, - ) - } - log.Logvf(log.DebugHigh, - "demux checksum for namespace %v is correct (%v), %v bytes", - demux.currentNamespace, crc, length) - } else { - log.Logvf(log.DebugHigh, - "demux checksum for namespace %v was not calculated.", - demux.currentNamespace) - } - delete(demux.outs, demux.currentNamespace) - delete(demux.lengths, demux.currentNamespace) - // in case we get a BSONBody with this block, - // we want to ensure that that causes an error - demux.currentNamespace = "" - } - return nil -} - -// End is part of the ParserConsumer interface and receives the end of archive notification. -func (demux *Demultiplexer) End() error { - log.Logvf(log.DebugHigh, "demux End") - var err error - if len(demux.outs) != 0 { - openNss := []string{} - for ns := range demux.outs { - openNss = append(openNss, ns) - if rcr, ok := demux.outs[ns].(*RegularCollectionReceiver); ok { - rcr.err = newError("archive io error") - } - demux.outs[ns].End() - } - err = newError(fmt.Sprintf("archive finished but contained files were unfinished (%v)", openNss)) - } else { - for ns, status := range demux.NamespaceStatus { - if status != NamespaceClosed { - err = newError(fmt.Sprintf("archive finished before all collections were seen (%v)", ns)) - } - } - } - - if demux.NamespaceChan != nil { - close(demux.NamespaceChan) - } - - return err -} - -// BodyBSON is part of the ParserConsumer interface and receives BSON bodies from the parser. -// Its main role is to dispatch the body to the Read() function of the current DemuxOut. -func (demux *Demultiplexer) BodyBSON(buf []byte) error { - if demux.currentNamespace == "" { - return newError("collection data without a collection header") - } - - // For atlas proxy archive restores, ignore collections from the admin DB. - if demux.IsAtlasProxy && strings.HasPrefix(demux.currentNamespace, "admin.") { - return nil - } - - demux.lengths[demux.currentNamespace] += int64(len(buf)) - - out, ok := demux.outs[demux.currentNamespace] - if !ok { - return newError("no demux consumer currently consuming namespace " + demux.currentNamespace) - } - _, err := out.Write(buf) - return err -} - -// Open installs the DemuxOut as the handler for data for the namespace ns -func (demux *Demultiplexer) Open(ns string, out DemuxOut) { - // In the current implementation where this is either called before the demultiplexing is running - // or while the demutiplexer is inside of the NamespaceChan NamespaceErrorChan conversation - // I think that we don't need to lock outs, but I suspect that if the implementation changes - // we may need to lock when outs is accessed - log.Logvf(log.DebugHigh, "demux Open for %s", ns) - if demux.outs == nil { - demux.outs = make(map[string]DemuxOut) - demux.lengths = make(map[string]int64) - } - demux.outs[ns] = out - demux.lengths[ns] = 0 -} - -// RegularCollectionReceiver implements the intents.file interface. -type RegularCollectionReceiver struct { - pos int64 // updated atomically, aligned at the beginning of the struct - readLenChan chan int - readBufChan chan []byte - Intent *intents.Intent - Origin string - Demux *Demultiplexer - partialReadArray []byte - partialReadBuf []byte - hash hash.Hash64 - closeOnce sync.Once - endOnce sync.Once - openOnce sync.Once - err error -} - -func (receiver *RegularCollectionReceiver) Sum64() (uint64, bool) { - return receiver.hash.Sum64(), true -} - -// Read() runs in the restoring goroutine -func (receiver *RegularCollectionReceiver) Read(r []byte) (int, error) { - if receiver.partialReadBuf != nil && len(receiver.partialReadBuf) > 0 { - wLen := len(receiver.partialReadBuf) - copyLen := copy(r, receiver.partialReadBuf) - if wLen == copyLen { - receiver.partialReadBuf = nil - } else { - receiver.partialReadBuf = receiver.partialReadBuf[copyLen:] - } - atomic.AddInt64(&receiver.pos, int64(copyLen)) - return copyLen, nil - } - // Since we're the "reader" here, not the "writer" we need to start with a read, in case the chan is closed - wLen, ok := <-receiver.readLenChan - if !ok { - return 0, receiver.err - } - if wLen > db.MaxBSONSize { - return 0, fmt.Errorf("incoming buffer size is too big %v", wLen) - } - rLen := len(r) - if wLen > rLen { - // if the incoming write size is larger then the incoming read buffer then we need to accept - // the write in a larger buffer, fill the read buffer, then cache the remainder - receiver.partialReadBuf = receiver.partialReadArray[:wLen] - receiver.readBufChan <- receiver.partialReadBuf - writtenLength := <-receiver.readLenChan - if wLen != writtenLength { - return 0, fmt.Errorf("regularCollectionReceiver didn't send what it said it would") - } - receiver.hash.Write(receiver.partialReadBuf) - copy(r, receiver.partialReadBuf) - receiver.partialReadBuf = receiver.partialReadBuf[rLen:] - atomic.AddInt64(&receiver.pos, int64(rLen)) - return rLen, nil - } - // Send the read buff to the BodyBSON ParserConsumer to fill - receiver.readBufChan <- r - // Receiver the wLen of data written - wLen = <-receiver.readLenChan - receiver.hash.Write(r[:wLen]) - atomic.AddInt64(&receiver.pos, int64(wLen)) - return wLen, nil -} - -func (receiver *RegularCollectionReceiver) Pos() int64 { - return atomic.LoadInt64(&receiver.pos) -} - -// Open is part of the intents.file interface. It creates the chan's in the -// RegularCollectionReceiver and adds the RegularCollectionReceiver to the set of -// RegularCollectionReceivers in the demultiplexer -func (receiver *RegularCollectionReceiver) Open() error { - // TODO move this implementation to some non intents.file method, to be called from prioritizer.Get - // So that we don't have to enable this double open stuff. - // Currently the open needs to finish before the prioritizer.Get finishes, so we open the intents.file - // in prioritizer.Get even though it's going to get opened again in DumpIntent. - receiver.openOnce.Do(func() { - receiver.readLenChan = make(chan int) - receiver.readBufChan = make(chan []byte) - receiver.hash = crc64.New(crc64.MakeTable(crc64.ECMA)) - receiver.Demux.Open(receiver.Origin, receiver) - }) - return nil -} - -func (receiver *RegularCollectionReceiver) TakeIOBuffer(ioBuf []byte) { - receiver.partialReadArray = ioBuf - -} -func (receiver *RegularCollectionReceiver) ReleaseIOBuffer() { - receiver.partialReadArray = nil -} - -// Write is part of the DemuxOut interface. -func (receiver *RegularCollectionReceiver) Write(buf []byte) (int, error) { - // As a writer, we need to write first, so that the reader can properly detect EOF - // Additionally, the reader needs to know the write size, so that it can give us a - // properly sized buffer. Sending the incoming buffersize fills both of these needs. - // However, it's possible the intent has been closed already so the reader - // won't be reading the channel. To avoid deadlock, we check for closed - // readBufChan. - select { - case receiver.readLenChan <- len(buf): - // do nothing other than send - case <-receiver.readBufChan: - // Will only receive this if closed - return 0, errInterrupted - } - - // Receive from the reader a buffer to put the bytes into - readBuf := <-receiver.readBufChan - if len(readBuf) < len(buf) { - return 0, fmt.Errorf("readbuf is not large enough for incoming BodyBSON (%v<%v)", - len(readBuf), len(buf)) - } - copy(readBuf, buf) - // Send back the length of the data copied in to the buffer - receiver.readLenChan <- len(buf) - return len(buf), nil -} - -// Close is part of the DemuxOut as well as the intents.file interface. It only closes the readLenChan, as that is what will -// cause the RegularCollectionReceiver.Read() to receive EOF -// Close will get called twice, once in the demultiplexer, and again when the restore goroutine is done with its intent.file -func (receiver *RegularCollectionReceiver) Close() error { - // Close must be idempotent and repeat channel closes panic; only do once. - receiver.closeOnce.Do(func() { - close(receiver.readBufChan) - }) - return nil -} - -// End signals to any waiting readers that there is nothing more to read, then -// it waits on the readBufChan, which is closed by the reader-side Close() -// method. -func (receiver *RegularCollectionReceiver) End() { - // To keep this idempotent, close the channel only once. - receiver.endOnce.Do(func() { - close(receiver.readLenChan) - }) - <-receiver.readBufChan -} - -// SpecialCollectionCache implements both DemuxOut as well as intents.file -type SpecialCollectionCache struct { - pos int64 // updated atomically, aligned at the beginning of the struct - Intent *intents.Intent - Demux *Demultiplexer - buf bytes.Buffer - hash hash.Hash64 -} - -func NewSpecialCollectionCache(intent *intents.Intent, demux *Demultiplexer) *SpecialCollectionCache { - return &SpecialCollectionCache{ - Intent: intent, - Demux: demux, - hash: crc64.New(crc64.MakeTable(crc64.ECMA)), - } -} - -// Open is part of the both interfaces, and it does nothing -func (cache *SpecialCollectionCache) Open() error { - return nil -} - -// Close is part of the intents.file interface, and does nothing -func (cache *SpecialCollectionCache) Close() error { - return nil -} - -// End indicates we've read all there is to read, so -// we update the intent size based on the final length. -func (cache *SpecialCollectionCache) End() { - cache.Intent.Size = int64(cache.buf.Len()) -} - -func (cache *SpecialCollectionCache) Read(p []byte) (int, error) { - n, err := cache.buf.Read(p) - atomic.AddInt64(&cache.pos, int64(n)) - return n, err -} - -func (cache *SpecialCollectionCache) Pos() int64 { - return atomic.LoadInt64(&cache.pos) -} - -func (cache *SpecialCollectionCache) Write(b []byte) (int, error) { - cache.hash.Write(b) - return cache.buf.Write(b) -} - -func (cache *SpecialCollectionCache) Sum64() (uint64, bool) { - return cache.hash.Sum64(), true -} - -// MutedCollection implements both DemuxOut as well as intents.file. It serves as a way to -// let the demutiplexer ignore certain embedded streams -type MutedCollection struct { - Intent *intents.Intent - Demux *Demultiplexer -} - -// Read is part of the intents.file interface, and does nothing -func (*MutedCollection) Read([]byte) (int, error) { - // Read is part of the intents.file interface, and does nothing - return 0, io.EOF -} - -// Write is part of the intents.file interface, and does nothing -func (*MutedCollection) Write(b []byte) (int, error) { - return len(b), nil -} - -// Close is part of the intents.file interface, and does nothing -func (*MutedCollection) Close() error { - return nil -} - -// End is part of the DemuxOut interface and does nothing. -func (*MutedCollection) End() {} - -// Open is part of the intents.file interface, and does nothing -func (*MutedCollection) Open() error { - return nil -} - -// Sum64 is part of the DemuxOut interface -func (*MutedCollection) Sum64() (uint64, bool) { - return 0, false -} - -//===== Archive Manager Prioritizer ===== - -// NewPrioritizer creates a new Prioritizer and hooks up its Namespace channels to the ones in demux -func (demux *Demultiplexer) NewPrioritizer(mgr *intents.Manager) *Prioritizer { - return &Prioritizer{ - NamespaceChan: demux.NamespaceChan, - NamespaceErrorChan: demux.NamespaceErrorChan, - mgr: mgr, - } -} - -// Prioritizer is a completely reactive prioritizer -// Intents are handed out as they arrive in the archive -type Prioritizer struct { - NamespaceChan <-chan string - NamespaceErrorChan chan<- error - mgr *intents.Manager -} - -// Get waits for a new namespace from the NamespaceChan, and returns a Intent found for it -func (prioritizer *Prioritizer) Get() *intents.Intent { - namespace, ok := <-prioritizer.NamespaceChan - if !ok { - return nil - } - destDB, destC := util.SplitNamespace(namespace) - namespace = destDB + "." + strings.TrimPrefix(destC, "system.buckets.") - intent := prioritizer.mgr.IntentForNamespace(namespace) - if intent == nil { - prioritizer.NamespaceErrorChan <- fmt.Errorf("no intent for namespace %v", namespace) - } else { - if intent.BSONFile != nil { - intent.BSONFile.Open() - } - if intent.IsOplog() { - // once we see the oplog we - // cause the RestoreIntents to finish because we don't - // want RestoreIntents to restore the oplog - prioritizer.NamespaceErrorChan <- io.EOF - return nil - } - prioritizer.NamespaceErrorChan <- nil - } - return intent -} - -// Finish is part of the IntentPrioritizer interface, and does nothing -func (prioritizer *Prioritizer) Finish(*intents.Intent) { - // no-op - return -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/multiplexer.go b/vendor/github.com/mongodb/mongo-tools/common/archive/multiplexer.go deleted file mode 100644 index f6196ccd8..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/multiplexer.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package archive - -import ( - "fmt" - "hash" - "hash/crc64" - "io" - "reflect" - - "github.com/mongodb/mongo-tools/common/db" - "github.com/mongodb/mongo-tools/common/intents" - "github.com/mongodb/mongo-tools/common/log" - "go.mongodb.org/mongo-driver/bson" -) - -// bufferSize enables or disables the MuxIn buffering -// TODO: remove this constant and the non-buffered MuxIn implementations -const bufferWrites = true -const bufferSize = db.MaxBSONSize - -// Multiplexer is what one uses to create interleaved intents in an archive -type Multiplexer struct { - Out io.WriteCloser - Control chan *MuxIn - Completed chan error - // shutdownInputs allows the mux to tell the intent dumping worker - // go routines to shutdown, so that we can shutdown - shutdownInputs notifier - // ins and selectCases are correlating slices - ins []*MuxIn - selectCases []reflect.SelectCase - currentNamespace string -} - -type notifier interface { - Notify() -} - -// NewMultiplexer creates a Multiplexer and populates its Control/Completed chans -// it takes a WriteCloser, which is where in inputs will get multiplexed on to, -// and it takes a notifier, which should allow the multiplexer to ask for the shutdown -// of the inputs. -func NewMultiplexer(out io.WriteCloser, shutdownInputs notifier) *Multiplexer { - mux := &Multiplexer{ - Out: out, - Control: make(chan *MuxIn), - Completed: make(chan error), - shutdownInputs: shutdownInputs, - ins: []*MuxIn{ - nil, // There is no MuxIn for the Control case - }, - } - mux.selectCases = []reflect.SelectCase{ - { - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(mux.Control), - Send: reflect.Value{}, - }, - } - return mux -} - -// Run multiplexes until it receives an EOF on its Control chan. -func (mux *Multiplexer) Run() { - var err, completionErr error - for { - index, value, notEOF := reflect.Select(mux.selectCases) - EOF := !notEOF - if index == 0 { //Control index - if EOF { - log.Logvf(log.DebugLow, "Mux finish") - mux.Out.Close() - if completionErr != nil { - mux.Completed <- completionErr - } else if len(mux.selectCases) != 1 { - mux.Completed <- fmt.Errorf("Mux ending but selectCases still open %v", - len(mux.selectCases)) - } else { - mux.Completed <- nil - } - return - } - muxIn, ok := value.Interface().(*MuxIn) - if !ok { - mux.Completed <- fmt.Errorf("non MuxIn received on Control chan") // one for the MuxIn.Open - return - } - log.Logvf(log.DebugLow, "Mux open namespace %v", muxIn.Intent.DataNamespace()) - mux.selectCases = append(mux.selectCases, reflect.SelectCase{ - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(muxIn.writeChan), - Send: reflect.Value{}, - }) - mux.ins = append(mux.ins, muxIn) - } else { - if EOF { - // We need to let the MuxIn know that we've - // noticed this close. This fixes a race where - // the intent processing threads finish, then the main - // thread closes the mux's control chan and the mux - // processes the close on the control chan before it processes - // the close on the MuxIn chan - mux.ins[index].writeCloseFinishedChan <- struct{}{} - - err = mux.formatEOF(index, mux.ins[index]) - if err != nil { - mux.shutdownInputs.Notify() - mux.Out = &nopCloseNopWriter{} - completionErr = err - } - log.Logvf(log.DebugLow, "Mux close namespace %v", mux.ins[index].Intent.DataNamespace()) - mux.currentNamespace = "" - mux.selectCases = append(mux.selectCases[:index], mux.selectCases[index+1:]...) - mux.ins = append(mux.ins[:index], mux.ins[index+1:]...) - } else { - bsonBytes, ok := value.Interface().([]byte) - if !ok { - mux.Completed <- fmt.Errorf("multiplexer received a value that wasn't a []byte") - return - } - err = mux.formatBody(mux.ins[index], bsonBytes) - if err != nil { - mux.shutdownInputs.Notify() - mux.Out = &nopCloseNopWriter{} - completionErr = err - } - } - } - } -} - -type nopCloseNopWriter struct{} - -func (*nopCloseNopWriter) Close() error { return nil } -func (*nopCloseNopWriter) Write(p []byte) (int, error) { return len(p), nil } - -// formatBody writes the BSON in to the archive, potentially writing a new header -// if the document belongs to a different namespace from the last header. -func (mux *Multiplexer) formatBody(in *MuxIn, bsonBytes []byte) error { - var err error - var length int - defer func() { - in.writeLenChan <- length - }() - if in.Intent.DataNamespace() != mux.currentNamespace { - // Handle the change of which DB/Collection we're writing docs for - // If mux.currentNamespace then we need to terminate the current block - if mux.currentNamespace != "" { - l, err := mux.Out.Write(terminatorBytes) - if err != nil { - return err - } - if l != len(terminatorBytes) { - return io.ErrShortWrite - } - } - header, err := bson.Marshal(NamespaceHeader{ - Database: in.Intent.DB, - Collection: in.Intent.DataCollection(), - }) - if err != nil { - return err - } - l, err := mux.Out.Write(header) - if err != nil { - return err - } - if l != len(header) { - return io.ErrShortWrite - } - } - mux.currentNamespace = in.Intent.DataNamespace() - length, err = mux.Out.Write(bsonBytes) - if err != nil { - return err - } - return nil -} - -// formatEOF writes the EOF header in to the archive -func (mux *Multiplexer) formatEOF(index int, in *MuxIn) error { - var err error - if mux.currentNamespace != "" { - l, err := mux.Out.Write(terminatorBytes) - if err != nil { - return err - } - if l != len(terminatorBytes) { - return io.ErrShortWrite - } - } - eofHeader, err := bson.Marshal(NamespaceHeader{ - Database: in.Intent.DB, - Collection: in.Intent.DataCollection(), - EOF: true, - CRC: int64(in.hash.Sum64()), - }) - if err != nil { - return err - } - l, err := mux.Out.Write(eofHeader) - if err != nil { - return err - } - if l != len(eofHeader) { - return io.ErrShortWrite - } - l, err = mux.Out.Write(terminatorBytes) - if err != nil { - return err - } - if l != len(terminatorBytes) { - return io.ErrShortWrite - } - return nil -} - -// MuxIn is an implementation of the intents.file interface. -// They live in the intents, and are potentially owned by different threads than -// the thread owning the Multiplexer. -// They are out the intents write data to the multiplexer -type MuxIn struct { - writeChan chan []byte - writeLenChan chan int - writeCloseFinishedChan chan struct{} - buf []byte - hash hash.Hash64 - Intent *intents.Intent - Mux *Multiplexer -} - -// Read does nothing for MuxIns -func (muxIn *MuxIn) Read([]byte) (int, error) { - return 0, nil -} - -func (muxIn *MuxIn) Pos() int64 { - return 0 -} - -// Close closes the chans in the MuxIn. -// Ultimately the multiplexer will detect that they are closed and cause a -// formatEOF to occur. -func (muxIn *MuxIn) Close() error { - // the mux side of this gets closed in the mux when it gets an eof on the read - log.Logvf(log.DebugHigh, "MuxIn close %v", muxIn.Intent.DataNamespace()) - if bufferWrites { - muxIn.writeChan <- muxIn.buf - length := <-muxIn.writeLenChan - if length != len(muxIn.buf) { - return io.ErrShortWrite - } - muxIn.buf = nil - } - close(muxIn.writeChan) - close(muxIn.writeLenChan) - // We need to wait for the close on the writeChan to be processed before proceeding - // Otherwise we might assume that all work is finished and exit the program before - // the mux finishes writing the end of the archive - <-muxIn.writeCloseFinishedChan - return nil -} - -// Open is implemented in Mux.open, but in short, it creates chans and a select case -// and adds the SelectCase and the MuxIn in to the Multiplexer. -func (muxIn *MuxIn) Open() error { - log.Logvf(log.DebugHigh, "MuxIn open %v", muxIn.Intent.DataNamespace()) - muxIn.writeChan = make(chan []byte) - muxIn.writeLenChan = make(chan int) - muxIn.writeCloseFinishedChan = make(chan struct{}) - muxIn.buf = make([]byte, 0, bufferSize) - muxIn.hash = crc64.New(crc64.MakeTable(crc64.ECMA)) - if bufferWrites { - muxIn.buf = make([]byte, 0, db.MaxBSONSize) - } - muxIn.Mux.Control <- muxIn - return nil -} - -// Write hands a buffer to the Multiplexer and receives a written length from the multiplexer -// after the length is received, the buffer is free to be reused. -func (muxIn *MuxIn) Write(buf []byte) (int, error) { - size := int( - (uint32(buf[0]) << 0) | - (uint32(buf[1]) << 8) | - (uint32(buf[2]) << 16) | - (uint32(buf[3]) << 24), - ) - // TODO remove these checks, they're for debugging - if len(buf) < size { - panic(fmt.Errorf("corrupt bson in MuxIn.Write (size %v/%v)", size, len(buf))) - } - if buf[size-1] != 0 { - panic(fmt.Errorf("corrupt bson in MuxIn.Write bson has no-zero terminator %v, (size %v/%v)", buf[size-1], size, len(buf))) - } - if bufferWrites { - if len(muxIn.buf)+len(buf) > cap(muxIn.buf) { - muxIn.writeChan <- muxIn.buf - length := <-muxIn.writeLenChan - if length != len(muxIn.buf) { - return 0, io.ErrShortWrite - } - muxIn.buf = muxIn.buf[:0] - } - muxIn.buf = append(muxIn.buf, buf...) - } else { - muxIn.writeChan <- buf - length := <-muxIn.writeLenChan - if length != len(buf) { - return 0, io.ErrShortWrite - } - } - muxIn.hash.Write(buf) - return len(buf), nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/parser.go b/vendor/github.com/mongodb/mongo-tools/common/archive/parser.go deleted file mode 100644 index b8ffd1d1e..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/parser.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package archive - -import ( - "errors" - "fmt" - "io" - - "github.com/mongodb/mongo-tools/common/db" -) - -var errInterrupted = errors.New("archive reading interrupted") - -// parser.go implements the parsing of the low-level archive format -// The low level archive format is defined as zero or more blocks -// where each block is defined as: -// a header BSON document -// zero or more body BSON documents -// a four byte terminator (0xFFFFFFFF) - -// ParserConsumer is the interface that one needs to implement to consume data from the Parser -type ParserConsumer interface { - HeaderBSON([]byte) error - BodyBSON([]byte) error - End() error -} - -// Parser encapsulates the small amount of state that the parser needs to keep -type Parser struct { - In io.Reader - buf [db.MaxBSONSize]byte - length int -} - -type parserError struct { - Err error - Msg string -} - -// Error is part of the Error interface. It formats a parserError for human readability. -func (pe *parserError) Error() string { - err := fmt.Sprintf("corruption found in archive; %v", pe.Msg) - if pe.Err != nil { - err = fmt.Sprintf("%v ( %v )", err, pe.Err) - } - return err -} - -// newParserError creates a parserError with just a message -func newParserError(msg string) error { - return &parserError{ - Msg: msg, - } -} - -// newParserWrappedError creates a parserError with a message as well as an underlying cause error -func newParserWrappedError(msg string, err error) error { - // If parsing was terminated intentionally, pass through that error - // instead of a parser error. - if err == errInterrupted { - return err - } - return &parserError{ - Err: err, - Msg: msg, - } -} - -// readBSONOrTerminator reads at least four bytes, determines -// if the first four bytes are a terminator, a bson length, or something else. -// If they are a terminator, true,nil are returned. If they are a BSON length, -// then the remainder of the BSON document are read in to the parser, otherwise -// an error is returned. -func (parse *Parser) readBSONOrTerminator() (isTerminator bool, err error) { - parse.length = 0 - _, err = io.ReadFull(parse.In, parse.buf[0:4]) - if err == io.EOF { - return false, err - } - if err != nil { - return false, newParserWrappedError("I/O error reading length or terminator", err) - } - size := int32( - (uint32(parse.buf[0]) << 0) | - (uint32(parse.buf[1]) << 8) | - (uint32(parse.buf[2]) << 16) | - (uint32(parse.buf[3]) << 24), - ) - if size == terminator { - return true, nil - } - if size < minBSONSize || size > db.MaxBSONSize { - return false, newParserError(fmt.Sprintf("%v is neither a valid bson length nor a archive terminator", size)) - } - // TODO Because we're reusing this same buffer for all of our IO, we are basically guaranteeing that we'll - // copy the bytes twice. At some point we should fix this. It's slightly complex, because we'll need consumer - // methods closing one buffer and acquiring another - _, err = io.ReadFull(parse.In, parse.buf[4:size]) - if err != nil { - // any error, including EOF is an error so we wrap it up - return false, newParserWrappedError("read bson", err) - } - if parse.buf[size-1] != 0x00 { - return false, newParserError(fmt.Sprintf("bson (size: %v, byte: %d) doesn't end with a null byte", size, parse.buf[size-1])) - } - parse.length = int(size) - return false, nil -} - -// ReadAllBlocks calls ReadBlock() until it returns an error. -// If the error is EOF, then nil is returned, otherwise it returns the error -func (parse *Parser) ReadAllBlocks(consumer ParserConsumer) (err error) { - for err == nil { - err = parse.ReadBlock(consumer) - } - endError := consumer.End() - if err == io.EOF { - return endError - } - return err -} - -// ReadBlock reads one archive block ( header + body* + terminator ) -// calling consumer.HeaderBSON() on the header, consumer.BodyBSON() on each piece of body, -// and consumer.EOF() when EOF is encountered before any data was read. -// It returns nil if a whole block was read, io.EOF if nothing was read, -// and a parserError if there was any io error in the middle of the block, -// if either of the consumer methods return error, or if there was any sort of -// parsing failure. -func (parse *Parser) ReadBlock(consumer ParserConsumer) (err error) { - isTerminator, err := parse.readBSONOrTerminator() - if err != nil { - return err - } - if isTerminator { - return newParserError("consecutive terminators / headerless blocks are not allowed") - } - err = consumer.HeaderBSON(parse.buf[:parse.length]) - if err != nil { - return newParserWrappedError("ParserConsumer.HeaderBSON()", err) - } - for { - isTerminator, err = parse.readBSONOrTerminator() - if err != nil { // all errors, including EOF are errors here - return newParserWrappedError("ParserConsumer.BodyBSON()", err) - } - if isTerminator { - return nil - } - err = consumer.BodyBSON(parse.buf[:parse.length]) - if err != nil { - return newParserWrappedError("ParserConsumer.BodyBSON()", err) - } - } -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/prelude.go b/vendor/github.com/mongodb/mongo-tools/common/archive/prelude.go deleted file mode 100644 index c9420767f..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/prelude.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package archive - -import ( - "bytes" - "fmt" - "io" - "path/filepath" - "sync/atomic" - - "github.com/mongodb/mongo-tools/common/intents" - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" -) - -// MetadataFile implements intents.file -type MetadataFile struct { - pos int64 // updated atomically, aligned at the beginning of the struct - *bytes.Buffer - Intent *intents.Intent -} - -func (md *MetadataFile) Open() error { - return nil -} -func (md *MetadataFile) Close() error { - return nil -} - -func (md *MetadataFile) Read(p []byte) (int, error) { - n, err := md.Buffer.Read(p) - atomic.AddInt64(&md.pos, int64(n)) - return n, err -} - -func (md *MetadataFile) Pos() int64 { - return atomic.LoadInt64(&md.pos) -} - -// DirLike represents the group of methods done on directories and files in dump directories, -// or in archives, when mongorestore is figuring out what intents to create. -type DirLike interface { - Name() string - Path() string - Size() int64 - IsDir() bool - Stat() (DirLike, error) - ReadDir() ([]DirLike, error) - Parent() DirLike -} - -// Prelude represents the knowledge gleaned from reading the prelude out of the archive. -type Prelude struct { - Header *Header - DBS []string - NamespaceMetadatas []*CollectionMetadata - NamespaceMetadatasByDB map[string][]*CollectionMetadata -} - -// Read consumes and checks the magic number at the beginning of the archive, -// then it runs the parser with a Prelude as its consumer. -func (prelude *Prelude) Read(in io.Reader) error { - readMagicNumberBuf := make([]byte, 4) - _, err := io.ReadAtLeast(in, readMagicNumberBuf, 4) - if err != nil { - return fmt.Errorf("I/O failure reading beginning of archive: %v", err) - } - readMagicNumber := uint32( - (uint32(readMagicNumberBuf[0]) << 0) | - (uint32(readMagicNumberBuf[1]) << 8) | - (uint32(readMagicNumberBuf[2]) << 16) | - (uint32(readMagicNumberBuf[3]) << 24), - ) - - if readMagicNumber != MagicNumber { - return fmt.Errorf("stream or file does not appear to be a mongodump archive") - } - - if prelude.NamespaceMetadatasByDB != nil { - prelude.NamespaceMetadatasByDB = make(map[string][]*CollectionMetadata, 0) - } - - parser := Parser{In: in} - parserConsumer := &preludeParserConsumer{prelude: prelude} - return parser.ReadBlock(parserConsumer) -} - -// NewPrelude generates a Prelude using the contents of an intent.Manager. -func NewPrelude(manager *intents.Manager, concurrentColls int, serverVersion, toolVersion string) (*Prelude, error) { - prelude := Prelude{ - Header: &Header{ - FormatVersion: archiveFormatVersion, - ServerVersion: serverVersion, - ToolVersion: toolVersion, - ConcurrentCollections: int32(concurrentColls), - }, - NamespaceMetadatasByDB: make(map[string][]*CollectionMetadata, 0), - } - allIntents := manager.Intents() - for _, intent := range allIntents { - if intent.MetadataFile != nil { - archiveMetadata, ok := intent.MetadataFile.(*MetadataFile) - if !ok { - return nil, fmt.Errorf("MetadataFile is not an archive.Metadata") - } - prelude.AddMetadata(&CollectionMetadata{ - Database: intent.DB, - Collection: intent.C, - Metadata: archiveMetadata.Buffer.String(), - Type: intent.Type, - }) - } else { - prelude.AddMetadata(&CollectionMetadata{ - Database: intent.DB, - Collection: intent.C, - Type: intent.Type, - }) - } - } - return &prelude, nil -} - -// AddMetadata adds a metadata data structure to a prelude and does the required bookkeeping. -func (prelude *Prelude) AddMetadata(cm *CollectionMetadata) { - prelude.NamespaceMetadatas = append(prelude.NamespaceMetadatas, cm) - if prelude.NamespaceMetadatasByDB == nil { - prelude.NamespaceMetadatasByDB = make(map[string][]*CollectionMetadata) - } - _, ok := prelude.NamespaceMetadatasByDB[cm.Database] - if !ok { - prelude.DBS = append(prelude.DBS, cm.Database) - } - prelude.NamespaceMetadatasByDB[cm.Database] = append(prelude.NamespaceMetadatasByDB[cm.Database], cm) - log.Logvf(log.Info, "archive prelude %v.%v", cm.Database, cm.Collection) -} - -// Write writes the archive header. -func (prelude *Prelude) Write(out io.Writer) error { - magicNumberBytes := make([]byte, 4) - for i := range magicNumberBytes { - magicNumberBytes[i] = byte(uint32(MagicNumber) >> uint(i*8)) - } - _, err := out.Write(magicNumberBytes) - if err != nil { - return err - } - buf, err := bson.Marshal(prelude.Header) - if err != nil { - return err - } - _, err = out.Write(buf) - if err != nil { - return err - } - for _, cm := range prelude.NamespaceMetadatas { - buf, err = bson.Marshal(cm) - if err != nil { - return err - } - _, err = out.Write(buf) - if err != nil { - return err - } - } - _, err = out.Write(terminatorBytes) - if err != nil { - return err - } - return nil -} - -// preludeParserConsumer wraps a Prelude, and implements ParserConsumer. -type preludeParserConsumer struct { - prelude *Prelude -} - -// HeaderBSON is part of the ParserConsumer interface, it unmarshals archive Headers. -func (hpc *preludeParserConsumer) HeaderBSON(data []byte) error { - hpc.prelude.Header = &Header{} - err := bson.Unmarshal(data, hpc.prelude.Header) - if err != nil { - return err - } - return nil -} - -// BodyBSON is part of the ParserConsumer interface, it unmarshals CollectionMetadata's. -func (hpc *preludeParserConsumer) BodyBSON(data []byte) error { - cm := &CollectionMetadata{} - err := bson.Unmarshal(data, cm) - if err != nil { - return err - } - hpc.prelude.AddMetadata(cm) - return nil -} - -// BodyBSON is part of the ParserConsumer interface. -func (hpc *preludeParserConsumer) End() error { - return nil -} - -// PreludeExplorer implements DirLike. PreludeExplorer represent the databases, collections, -// and their metadata json files, of an archive, in such a way that they can be explored like a filesystem. -type PreludeExplorer struct { - prelude *Prelude - database string - collection string - isMetadata bool -} - -// NewPreludeExplorer creates a PreludeExplorer from a Prelude. -func (prelude *Prelude) NewPreludeExplorer() (*PreludeExplorer, error) { - pe := &PreludeExplorer{ - prelude: prelude, - } - return pe, nil -} - -// Name is part of the DirLike interface. It synthesizes a filename for the given "location" the prelude. -func (pe *PreludeExplorer) Name() string { - if pe.collection == "" { - return pe.database - } - if pe.isMetadata { - return util.EscapeCollectionName(pe.collection) + ".metadata.json" - } - return util.EscapeCollectionName(pe.collection) + ".bson" -} - -// Path is part of the DirLike interface. It creates the full path for the "location" in the prelude. -func (pe *PreludeExplorer) Path() string { - if pe.collection == "" { - return pe.database - } - if pe.database == "" { - return pe.Name() - } - return filepath.Join(pe.database, pe.Name()) -} - -// Size is part of the DirLike interface. It returns the size from the metadata -// of the prelude, if the "location" is a collection. -func (pe *PreludeExplorer) Size() int64 { - if pe.IsDir() { - return 0 - } - for _, ns := range pe.prelude.NamespaceMetadatas { - if ns.Database == pe.database && ns.Collection == pe.collection { - return int64(ns.Size) - } - } - return 0 -} - -// IsDir is part of the DirLike interface. All pes that are not collections are Dirs. -func (pe *PreludeExplorer) IsDir() bool { - return pe.collection == "" -} - -// Stat is part of the DirLike interface. os.Stat returns a FileInfo, and since -// DirLike is similar to FileInfo, we just return the pe, here. -func (pe *PreludeExplorer) Stat() (DirLike, error) { - return pe, nil -} - -// ReadDir is part of the DirLIke interface. ReadDir generates a list of PreludeExplorers -// whose "locations" are encapsulated by the current pes "location". -// -// "dump/oplog.bson" => &PreludeExplorer{ database: "", collection: "oplog.bson" } -// "dump/test/" => &PreludeExplorer{ database: "test", collection: "foo.bson" } -// "dump/test/foo.bson" => &PreludeExplorer{ database: "test", collection: "" } -// "dump/test/foo.json" => &PreludeExplorer{ database: "test", collection: "foo", isMetadata: true } -func (pe *PreludeExplorer) ReadDir() ([]DirLike, error) { - if !pe.IsDir() { - return nil, fmt.Errorf("not a directory") - } - pes := []DirLike{} - if pe.database == "" { - // when reading the top level of the archive, we need return all of the - // collections that are not bound to a database, aka, the oplog, and then all of - // the databases the prelude stores all top-level collections as collections in - // the "" database - topLevelNamespaceMetadatas, ok := pe.prelude.NamespaceMetadatasByDB[""] - if ok { - for _, topLevelNamespaceMetadata := range topLevelNamespaceMetadatas { - pes = append(pes, &PreludeExplorer{ - prelude: pe.prelude, - collection: topLevelNamespaceMetadata.Collection, - }) - if topLevelNamespaceMetadata.Metadata != "" { - pes = append(pes, &PreludeExplorer{ - prelude: pe.prelude, - collection: topLevelNamespaceMetadata.Collection, - isMetadata: true, - }) - } - } - } - for _, db := range pe.prelude.DBS { - pes = append(pes, &PreludeExplorer{ - prelude: pe.prelude, - database: db, - }) - } - } else { - // when reading the contents of a database directory, we just return all of the bson and - // json files for all of the collections bound to that database - namespaceMetadatas, ok := pe.prelude.NamespaceMetadatasByDB[pe.database] - if !ok { - return nil, fmt.Errorf("no such directory") //TODO: replace with real ERRNOs? - } - for _, namespaceMetadata := range namespaceMetadatas { - dataCollection := namespaceMetadata.Collection - if namespaceMetadata.Type == "timeseries" { - dataCollection = "system.buckets." + namespaceMetadata.Collection - } - pes = append(pes, &PreludeExplorer{ - prelude: pe.prelude, - database: pe.database, - collection: dataCollection, - }) - if namespaceMetadata.Metadata != "" { - pes = append(pes, &PreludeExplorer{ - prelude: pe.prelude, - database: pe.database, - collection: namespaceMetadata.Collection, - isMetadata: true, - }) - } - } - } - return pes, nil -} - -// Parent is part of the DirLike interface. It returns a pe without a collection, if there is one, -// otherwise, without a database. -func (pe *PreludeExplorer) Parent() DirLike { - if pe.collection != "" { - return &PreludeExplorer{ - prelude: pe.prelude, - database: pe.database, - } - } - return &PreludeExplorer{ - prelude: pe.prelude, - } -} - -// MetadataPreludeFile is part of the intents.file. It allows the metadata contained in the prelude to be opened and read -type MetadataPreludeFile struct { - pos int64 // updated atomically, aligned at the beginning of the struct - Intent *intents.Intent - Origin string - Prelude *Prelude - *bytes.Buffer -} - -// Open is part of the intents.file interface, it finds the metadata in the prelude and creates a bytes.Buffer from it. -func (mpf *MetadataPreludeFile) Open() error { - db, c := util.SplitNamespace(mpf.Origin) - dbMetadatas, ok := mpf.Prelude.NamespaceMetadatasByDB[db] - if !ok { - return fmt.Errorf("no metadata found for '%s'", db) - } - for _, metadata := range dbMetadatas { - if metadata.Collection == c { - mpf.Buffer = bytes.NewBufferString(metadata.Metadata) - return nil - } - } - return fmt.Errorf("no matching metadata found for '%s'", mpf.Origin) -} - -// Close is part of the intents.file interface. -func (mpf *MetadataPreludeFile) Close() error { - mpf.Buffer = nil - return nil -} - -func (mpf *MetadataPreludeFile) Read(p []byte) (int, error) { - n, err := mpf.Buffer.Read(p) - atomic.AddInt64(&mpf.pos, int64(n)) - return n, err -} - -func (mpf *MetadataPreludeFile) Pos() int64 { - return atomic.LoadInt64(&mpf.pos) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md b/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md deleted file mode 100644 index a195e0917..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md +++ /dev/null @@ -1,105 +0,0 @@ -# The Mongodump archive format specification - -The mongodump archive format contains metadata about collections and data dumped from those -collections. Data from multiple collections can be interleaved so multiple threads can dump data -from different collections into the archive concurrently. - -Here is the definition in BNF-like syntax: - -```ebnf -archive = magic-number , - header , - *collection-metadata , - terminator-bytes , - *(namespace-segment | namespace-eof) ; - -magic-number = 0x6de29981 ; (* little-endian representation of 0x8199e26d *) - -header = document ; - -collection-metadata = document ; - -terminator-bytes = 0xffffffff ; - -namespace-segment = namespace-header , namespace-data , terminator-bytes ; - -namespace-data = +document ; - -namespace-eof = eof-header , terminator-bytes ; - -namespace-header = document ; - -eof-header = document ; -``` - -## Explanatory notes - -`document` is a BSON document as defined by the [BSON Spec](https://bsonspec.org). The fields of -each document are defined here: - -- `header`: - - ``` - { - int32 concurrent_collections, - string version, - string server_version, - string tool_version - } - ``` - - - `concurrent_collections` - the number of collections dumped concurrently by mongodump as set by - the `--numParallelCollections` options. Mongorestore will choose the larger of - `concurrent_collections` and `--numParallelCollections` to set the number of collections to - restore in parallel. - - `version` - the archive format version. Currently there is only one version, `"0.1"`. - - `server_version` - the MongoDB version of the source database. - - `tool_version` - the version of mongodump that created the archive. - -- `collection-metadata`: - ``` - { - string db, - string collection, - string metadata, - int32 size, - string type - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `metadata` - the collection metadata (including options, index definitions, and collection type) - encoded in canonical - [Extended JSON v2](https://docs.mongodb.com/manual/reference/mongodb-extended-json/). This is - the same data written to `metadata.json` files. - - `size` - the total uncompressed size of the collection in bytes. - - `type` - set to `"timeseries"` for timeseries collections, `"view"` for views, and `""` - otherwise. -- `namespace-data`: One or more BSON documents from the collection. The collection's documents can - be split across multiple segments. -- `namespace-header`: - ``` - { - string db, - string collection, - bool EOF, - int64 CRC - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `EOF` - always `false`. - - `CRC` - always `0`. -- `eof-header`: - ``` - { - string db, - string collection, - bool EOF, - int64 CRC - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `EOF` - always `true`. - - `CRC` - the CRC-64-ECMA of all documents in the namespace (across all `namespace-segment`s). diff --git a/vendor/github.com/mongodb/mongo-tools/common/auth/auth_info.go b/vendor/github.com/mongodb/mongo-tools/common/auth/auth_info.go deleted file mode 100644 index 773f47e54..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/auth/auth_info.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package auth provides utilities for performing tasks related to authentication. -package auth - -import ( - "fmt" - "strings" - - "github.com/mongodb/mongo-tools/common/db" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" -) - -// GetAuthVersion gets the authentication schema version of the connected server -// and returns that value as an integer along with any error that occurred. -func GetAuthVersion(sessionProvider *db.SessionProvider) (int, error) { - results := bson.M{} - err := sessionProvider.Run( - bson.D{ - {"getParameter", 1}, - {"authSchemaVersion", 1}, - }, - &results, - "admin", - ) - - if err != nil { - errMessage := err.Error() - // as a necessary hack, if the error message takes a certain form, - // we can infer version 1. This is because early versions of mongodb - // had no concept of an "auth schema version", so asking for the - // authSchemaVersion value will return a "no option found" or "no such cmd" - if errMessage == "no option found to get" || - strings.Contains(errMessage, "no such cmd") { - return 1, nil - } - // otherwise it's a connection error, so bubble it up - return 0, err - } - - version, err := util.ToInt(results["authSchemaVersion"]) - if err != nil { - return 0, fmt.Errorf("getParameter command returned non-numeric result: %v, error: %v", results["authSchemaVersion"], err) - } - return version, nil -} - -// VerifySystemAuthVersion returns an error if authentication is not set up for -// the given server. -func VerifySystemAuthVersion(sessionProvider *db.SessionProvider) error { - session, err := sessionProvider.GetSession() - if err != nil { - return fmt.Errorf("error getting session from server: %v", err) - } - - authSchemaQuery := bson.M{"_id": "authSchema"} - count, err := session.Database("admin").Collection("system.version").CountDocuments(nil, authSchemaQuery) - if err != nil { - return fmt.Errorf("error checking pressence of auth version: %v", err) - } else if count == 0 { - return fmt.Errorf("found no auth version") - } - return nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go deleted file mode 100644 index ace571c8a..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package bsonutil provides utilities for processing BSON data. -package bsonutil - -import ( - "bytes" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "reflect" - "strconv" - "time" - - "github.com/mongodb/mongo-tools/common/json" - "github.com/mongodb/mongo-tools/common/util" - errors2 "github.com/pkg/errors" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -var ErrNoSuchField = errors.New("no such field") - -// IsEqual marshals two documents to raw BSON and compares them. -func IsEqual(left, right bson.D) (bool, error) { - leftBytes, err := bson.Marshal(left) - if err != nil { - return false, err - } - - rightBytes, err := bson.Marshal(right) - if err != nil { - return false, err - } - - return bytes.Compare(leftBytes, rightBytes) == 0, nil -} - -// ConvertLegacyExtJSONDocumentToBSON iterates through the document map and converts JSON -// values to their corresponding BSON values. It also replaces any extended JSON -// type value (e.g. $date) with the corresponding BSON type -func ConvertLegacyExtJSONDocumentToBSON(doc map[string]interface{}) error { - for key, jsonValue := range doc { - var bsonValue interface{} - var err error - - switch v := jsonValue.(type) { - case map[string]interface{}, bson.D: // subdocument - bsonValue, err = ParseSpecialKeys(v) - default: - bsonValue, err = ConvertLegacyExtJSONValueToBSON(v) - } - if err != nil { - return err - } - - doc[key] = bsonValue - } - return nil -} - -// GetExtendedBsonD iterates through the document and returns a bson.D that adds type -// information for each key in document. -func GetExtendedBsonD(doc bson.D) (bson.D, error) { - var err error - var bsonDoc bson.D - for _, docElem := range doc { - var bsonValue interface{} - switch v := docElem.Value.(type) { - case map[string]interface{}, bson.D: // subdocument - bsonValue, err = ParseSpecialKeys(v) - default: - bsonValue, err = ConvertLegacyExtJSONValueToBSON(v) - } - if err != nil { - return nil, err - } - bsonDoc = append(bsonDoc, bson.E{ - Key: docElem.Key, - Value: bsonValue, - }) - } - return bsonDoc, nil -} - -// FindValueByKey returns the value of keyName in document. If keyName is not found -// in the top-level of the document, ErrNoSuchField is returned as the error. -func FindValueByKey(keyName string, document *bson.D) (interface{}, error) { - for _, key := range *document { - if key.Key == keyName { - return key.Value, nil - } - } - return nil, ErrNoSuchField -} - -// FindStringValueByKey returns the value of keyName in document as a String. -// Returns an error if keyName is not found in the top-level of the document, -// or if it is found but its value is not a string. -func FindStringValueByKey(keyName string, document *bson.D) (string, error) { - value, err := FindValueByKey(keyName, document) - if err != nil { - return "", err - } - str, ok := value.(string) - if !ok { - return "", fmt.Errorf("field present, but is not a string: %v", value) - } - return str, nil -} - -// FindIntByKey returns the value of keyName in the document as an int for -// either int32 or int64 underlying type. -func FindIntByKey(keyName string, document *bson.D) (int, error) { - raw, err := FindValueByKey(keyName, document) - if err != nil { - return 0, err - } - switch x := raw.(type) { - case int32: - return int(x), nil - case int64: - return int(x), nil - case int: - return x, nil - default: - return 0, fmt.Errorf("field '%s' is not an integer type", keyName) - } -} - -// FindSubdocumentByKey returns the value of keyName in document as a document. -// Returns an error if keyName is not found in the top-level of the document, -// or if it is found but its value is not a document. -func FindSubdocumentByKey(keyName string, document *bson.D) (bson.D, error) { - value, err := FindValueByKey(keyName, document) - if err != nil { - return bson.D{}, err - } - doc, ok := value.(bson.D) - if !ok { - return bson.D{}, fmt.Errorf("field '%s' is not a document", keyName) - } - return doc, nil -} - -// RemoveKey removes the given key. Returns the removed value and true if the -// key was found. -func RemoveKey(key string, document *bson.D) (interface{}, bool) { - if document == nil { - return nil, false - } - doc := *document - for i, elem := range doc { - if elem.Key == key { - // Remove this key. - *document = append(doc[:i], doc[i+1:]...) - return elem.Value, true - } - } - return nil, false -} - -// ParseSpecialKeys takes a JSON document and inspects it for any extended JSON -// type (e.g $numberLong) and replaces any such values with the corresponding -// BSON type. (uses legacy extJSON parser) -func ParseSpecialKeys(special interface{}) (interface{}, error) { - // first ensure we are using a correct document type - var doc map[string]interface{} - switch v := special.(type) { - case bson.D: - doc = v.Map() - case map[string]interface{}: - doc = v - default: - return nil, fmt.Errorf("%v (type %T) is not valid input to ParseSpecialKeys", special, special) - } - // check document to see if it is special - switch len(doc) { - case 1: // document has a single field - if jsonValue, ok := doc["$date"]; ok { - switch v := jsonValue.(type) { - case string: - return util.FormatDate(v) - case bson.D: - asMap := v.Map() - if jsonValue, ok := asMap["$numberLong"]; ok { - n, err := parseNumberLongField(jsonValue) - if err != nil { - return nil, err - } - return time.Unix(n/1e3, n%1e3*1e6), err - } - return nil, errors.New("expected $numberLong field in $date") - case map[string]interface{}: - if jsonValue, ok := v["$numberLong"]; ok { - n, err := parseNumberLongField(jsonValue) - if err != nil { - return nil, err - } - return time.Unix(n/1e3, n%1e3*1e6), err - } - return nil, errors.New("expected $numberLong field in $date") - - case json.Number: - n, err := v.Int64() - return time.Unix(n/1e3, n%1e3*1e6), err - case float64: - n := int64(v) - return time.Unix(n/1e3, n%1e3*1e6), nil - case int32: - n := int64(v) - return time.Unix(n/1e3, n%1e3*1e6), nil - case int64: - return time.Unix(v/1e3, v%1e3*1e6), nil - - case json.ISODate: - return v, nil - - default: - return nil, errors.New("invalid type for $date field") - } - } - - if jsonValue, ok := doc["$code"]; ok { - switch v := jsonValue.(type) { - case string: - return primitive.JavaScript(v), nil - default: - return nil, errors.New("expected $code field to have string value") - } - } - - if jsonValue, ok := doc["$oid"]; ok { - switch v := jsonValue.(type) { - case string: - return primitive.ObjectIDFromHex(v) - default: - return nil, errors.New("expected $oid field to have string value") - } - } - - if jsonValue, ok := doc["$numberLong"]; ok { - return parseNumberLongField(jsonValue) - } - - if jsonValue, ok := doc["$numberInt"]; ok { - switch v := jsonValue.(type) { - case string: - // all of decimal, hex, and octal are supported here - n, err := strconv.ParseInt(v, 0, 32) - return int32(n), err - - default: - return nil, errors.New("expected $numberInt field to have string value") - } - } - - if jsonValue, ok := doc["$timestamp"]; ok { - ts := json.Timestamp{} - - var tsDoc map[string]interface{} - switch internalDoc := jsonValue.(type) { - case map[string]interface{}: - tsDoc = internalDoc - case bson.D: - tsDoc = internalDoc.Map() - default: - return nil, errors.New("expected $timestamp key to have internal document") - } - - if seconds, ok := tsDoc["t"]; ok { - if asUint32, err := util.ToUInt32(seconds); err == nil { - ts.Seconds = asUint32 - } else { - return nil, errors.New("expected $timestamp 't' field to be a numeric type") - } - } else { - return nil, errors.New("expected $timestamp to have 't' field") - } - if inc, ok := tsDoc["i"]; ok { - if asUint32, err := util.ToUInt32(inc); err == nil { - ts.Increment = asUint32 - } else { - return nil, errors.New("expected $timestamp 'i' field to be a numeric type") - } - } else { - return nil, errors.New("expected $timestamp to have 'i' field") - } - // see BSON spec for details on the bit fiddling here - return primitive.Timestamp{T: uint32(ts.Seconds), I: uint32(ts.Increment)}, nil - } - - if jsonValue, ok := doc["$numberDecimal"]; ok { - switch v := jsonValue.(type) { - case string: - return primitive.ParseDecimal128(v) - default: - return nil, errors.New("expected $numberDecimal field to have string value") - } - } - - if _, ok := doc["$undefined"]; ok { - return primitive.Undefined{}, nil - } - - if _, ok := doc["$maxKey"]; ok { - return primitive.MaxKey{}, nil - } - - if _, ok := doc["$minKey"]; ok { - return primitive.MinKey{}, nil - } - - case 2: // document has two fields - if jsonValue, ok := doc["$code"]; ok { - code := primitive.CodeWithScope{} - switch v := jsonValue.(type) { - case string: - code.Code = primitive.JavaScript(v) - default: - return nil, errors.New("expected $code field to have string value") - } - - if jsonValue, ok = doc["$scope"]; ok { - switch v2 := jsonValue.(type) { - case map[string]interface{}, bson.D: - x, err := ParseSpecialKeys(v2) - if err != nil { - return nil, err - } - code.Scope = x - return code, nil - default: - return nil, errors.New("expected $scope field to contain map") - } - } else { - return nil, errors.New("expected $scope field with $code field") - } - } - - if jsonValue, ok := doc["$regex"]; ok { - regex := primitive.Regex{} - - switch pattern := jsonValue.(type) { - case string: - regex.Pattern = pattern - - default: - return nil, errors.New("expected $regex field to have string value") - } - if jsonValue, ok = doc["$options"]; !ok { - return nil, errors.New("expected $options field with $regex field") - } - - switch options := jsonValue.(type) { - case string: - regex.Options = options - - default: - return nil, errors.New("expected $options field to have string value") - } - - // Validate regular expression options - for i := range regex.Options { - switch o := regex.Options[i]; o { - default: - return nil, fmt.Errorf("invalid regular expression option '%v'", o) - - case 'g', 'i', 'm', 's': // allowed - } - } - return regex, nil - } - - if jsonValue, ok := doc["$binary"]; ok { - binary := primitive.Binary{} - - switch data := jsonValue.(type) { - case string: - bytes, err := base64.StdEncoding.DecodeString(data) - if err != nil { - return nil, err - } - binary.Data = bytes - - default: - return nil, errors.New("expected $binary field to have string value") - } - if jsonValue, ok = doc["$type"]; !ok { - return nil, errors.New("expected $type field with $binary field") - } - - switch typ := jsonValue.(type) { - case string: - kind, err := hex.DecodeString(typ) - if err != nil { - return nil, err - } else if len(kind) != 1 { - return nil, errors.New("expected single byte (as hexadecimal string) for $type field") - } - binary.Subtype = kind[0] - - default: - return nil, errors.New("expected $type field to have string value") - } - return binary, nil - } - } - - // nothing matched, so we recurse deeper - switch v := special.(type) { - case bson.D: - return GetExtendedBsonD(v) - case map[string]interface{}: - return ConvertLegacyExtJSONValueToBSON(v) - default: - return nil, fmt.Errorf("%v (type %T) is not valid input to ParseSpecialKeys", special, special) - } -} - -// ParseLegacyExtJSONValue takes any value generated by the json package and returns a -// BSON version of that value. -func ParseLegacyExtJSONValue(jsonValue interface{}) (interface{}, error) { - switch v := jsonValue.(type) { - case map[string]interface{}, bson.D: // subdocument - return ParseSpecialKeys(v) - - default: - return ConvertLegacyExtJSONValueToBSON(v) - } -} - -func parseNumberLongField(jsonValue interface{}) (int64, error) { - switch v := jsonValue.(type) { - case string: - // all of decimal, hex, and octal are supported here - return strconv.ParseInt(v, 0, 64) - - default: - return 0, errors.New("expected $numberLong field to have string value") - } -} - -func Bson2Float64(data interface{}) (float64, bool) { - switch v := data.(type) { - case int32: - return float64(v), true - case int64: - return float64(v), true - case float64: - return v, true - case primitive.Decimal128: - if bi, _, err := v.BigInt(); err == nil { - intVal := bi.Int64() - return float64(intVal), true - } - return 0, false - } - return 0, false -} - -// MtoD converts a bson.M to a bson.D -func MtoD(m bson.M) bson.D { - doc := make(bson.D, 0, len(m)) - for key, value := range m { - doc = append(doc, bson.E{key, value}) - } - return doc -} - -// MarshalExtJSONReversible is a wrapper around bson.MarshalExtJSON function, -// but would return an error if it cannot be reversed by bson.UnmarshalExtJSON. -// -// It is preferred to be used in mongodump to avoid generating un-reversible ext JSON. -func MarshalExtJSONReversible(val interface{}, canonical bool, escapeHTML bool) ([]byte, error) { - jsonBytes, err := bson.MarshalExtJSON(val, canonical, escapeHTML) - if err != nil { - return nil, err - } - reversedVal := reflect.New(reflect.TypeOf(val)).Elem().Interface() - if unmarshalErr := bson.UnmarshalExtJSON(jsonBytes, canonical, &reversedVal); unmarshalErr != nil { - return nil, errors2.Wrap(unmarshalErr, "marshal is not reversible") - } - return jsonBytes, nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/converter.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/converter.go deleted file mode 100644 index 6c20bc1fa..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/converter.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonutil - -import ( - "encoding/base64" - "fmt" - "time" - - "github.com/mongodb/mongo-tools/common/json" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// ConvertLegacyExtJSONValueToBSON walks through a document or an array and -// replaces any extended JSON value with its corresponding BSON type. -func ConvertLegacyExtJSONValueToBSON(x interface{}) (interface{}, error) { - switch v := x.(type) { - case nil: - return nil, nil - case bool: - return v, nil - case map[string]interface{}: // document - for key, jsonValue := range v { - bsonValue, err := ParseLegacyExtJSONValue(jsonValue) - if err != nil { - return nil, err - } - v[key] = bsonValue - } - return v, nil - case bson.D: - for i := range v { - var err error - v[i].Value, err = ParseLegacyExtJSONValue(v[i].Value) - if err != nil { - return nil, err - } - } - return v, nil - - case []interface{}: // array - for i, jsonValue := range v { - bsonValue, err := ParseLegacyExtJSONValue(jsonValue) - if err != nil { - return nil, err - } - v[i] = bsonValue - } - return v, nil - - case string, float64, int32, int64: - return v, nil // require no conversion - - case json.ObjectId: // ObjectId - s := string(v) - return primitive.ObjectIDFromHex(s) - - case json.Decimal128: - return v.Decimal128, nil - - case json.Date: // Date - n := int64(v) - return time.Unix(n/1e3, n%1e3*1e6), nil - - case json.ISODate: // ISODate - n := string(v) - return util.FormatDate(n) - - case json.NumberLong: // NumberLong - return int64(v), nil - - case json.NumberInt: // NumberInt - return int32(v), nil - - case json.NumberFloat: // NumberFloat - return float64(v), nil - case json.BinData: // BinData - data, err := base64.StdEncoding.DecodeString(v.Base64) - if err != nil { - return nil, err - } - return primitive.Binary{v.Type, data}, nil - - case json.DBPointer: // DBPointer, for backwards compatibility - return primitive.DBPointer{v.Namespace, v.Id}, nil - - case json.RegExp: // RegExp - return primitive.Regex{v.Pattern, v.Options}, nil - - case json.Timestamp: // Timestamp - return primitive.Timestamp{T: v.Seconds, I: v.Increment}, nil - - case json.JavaScript: // Javascript - if v.Scope != nil { - return primitive.CodeWithScope{Code: primitive.JavaScript(v.Code), Scope: v.Scope}, nil - } - return primitive.JavaScript(v.Code), nil - - case json.MinKey: // MinKey - return primitive.MinKey{}, nil - - case json.MaxKey: // MaxKey - return primitive.MaxKey{}, nil - - case json.Undefined: // undefined - return primitive.Undefined{}, nil - - default: - return nil, fmt.Errorf("conversion of JSON value '%v' of type '%T' not supported", v, v) - } -} - -func convertKeys(v bson.M) (bson.M, error) { - for key, value := range v { - jsonValue, err := ConvertBSONValueToLegacyExtJSON(value) - if err != nil { - return nil, err - } - v[key] = jsonValue - } - return v, nil -} - -func getConvertedKeys(v bson.M) (bson.M, error) { - out := bson.M{} - for key, value := range v { - jsonValue, err := GetBSONValueAsLegacyExtJSON(value) - if err != nil { - return nil, err - } - out[key] = jsonValue - } - return out, nil -} - -func convertArray(v bson.A) ([]interface{}, error) { - for i, value := range v { - jsonValue, err := ConvertBSONValueToLegacyExtJSON(value) - if err != nil { - return nil, err - } - v[i] = jsonValue - } - return []interface{}(v), nil -} - -// ConvertBSONValueToLegacyExtJSON walks through a document or an array and -// converts any BSON value to its corresponding extended JSON type. -// It returns the converted JSON document and any error encountered. -func ConvertBSONValueToLegacyExtJSON(x interface{}) (interface{}, error) { - switch v := x.(type) { - case nil: - return nil, nil - case bool: - return v, nil - - case *bson.M: // document - doc, err := convertKeys(*v) - if err != nil { - return nil, err - } - return doc, err - case bson.M: // document - return convertKeys(v) - case map[string]interface{}: - return convertKeys(v) - case bson.D: - for i, value := range v { - jsonValue, err := ConvertBSONValueToLegacyExtJSON(value.Value) - if err != nil { - return nil, err - } - v[i].Value = jsonValue - } - return MarshalD(v), nil - case MarshalD: - return v, nil - case bson.A: // array - return convertArray(v) - case []interface{}: // array - return convertArray(v) - case string: - return v, nil // require no conversion - - case int: - return json.NumberInt(v), nil - - case primitive.ObjectID: // ObjectId - return json.ObjectId(v.Hex()), nil - - case primitive.Decimal128: - return json.Decimal128{v}, nil - - case primitive.DateTime: // Date - return json.Date(v), nil - - case time.Time: // Date - return json.Date(v.Unix()*1000 + int64(v.Nanosecond()/1e6)), nil - - case int64: // NumberLong - return json.NumberLong(v), nil - - case int32: // NumberInt - return json.NumberInt(v), nil - - case float64: - return json.NumberFloat(v), nil - - case float32: - return json.NumberFloat(float64(v)), nil - - case []byte: // BinData (with generic type) - data := base64.StdEncoding.EncodeToString(v) - return json.BinData{0x00, data}, nil - - case primitive.Binary: // BinData - data := base64.StdEncoding.EncodeToString(v.Data) - return json.BinData{v.Subtype, data}, nil - - case primitive.DBPointer: // DBPointer - return json.DBPointer{v.DB, v.Pointer}, nil - - case primitive.Regex: // RegExp - return json.RegExp{v.Pattern, v.Options}, nil - - case primitive.Timestamp: // Timestamp - return json.Timestamp{ - Seconds: v.T, - Increment: v.I, - }, nil - - case primitive.JavaScript: // JavaScript Code - return json.JavaScript{Code: string(v), Scope: nil}, nil - - case primitive.CodeWithScope: // JavaScript Code w/ Scope - var scope interface{} - var err error - if v.Scope != nil { - scope, err = ConvertBSONValueToLegacyExtJSON(v.Scope) - if err != nil { - return nil, err - } - } - return json.JavaScript{string(v.Code), scope}, nil - - case primitive.MaxKey: // MaxKey - return json.MaxKey{}, nil - - case primitive.MinKey: // MinKey - return json.MinKey{}, nil - - case primitive.Undefined: // undefined - return json.Undefined{}, nil - - case primitive.Null: // Null - return nil, nil - } - - return nil, fmt.Errorf("conversion of BSON value '%v' of type '%T' not supported", x, x) -} - -// GetBSONValueAsLegacyExtJSON is equivalent to ConvertBSONValueToLegacyExtJSON, but does not mutate its argument. -func GetBSONValueAsLegacyExtJSON(x interface{}) (interface{}, error) { - switch v := x.(type) { - case nil: - return nil, nil - case bool: - return v, nil - - case *bson.M: // document - doc, err := getConvertedKeys(*v) - if err != nil { - return nil, err - } - return doc, err - case bson.M: // document - return getConvertedKeys(v) - case map[string]interface{}: - return getConvertedKeys(v) - case bson.D: - out := bson.D{} - for _, value := range v { - jsonValue, err := GetBSONValueAsLegacyExtJSON(value.Value) - if err != nil { - return nil, err - } - out = append(out, bson.E{ - Key: value.Key, - Value: jsonValue, - }) - } - return MarshalD(out), nil - case MarshalD: - out, err := GetBSONValueAsLegacyExtJSON(bson.D(v)) - if err != nil { - return nil, err - } - return MarshalD(out.(bson.D)), nil - case []interface{}: // array - out := []interface{}{} - for _, value := range v { - jsonValue, err := GetBSONValueAsLegacyExtJSON(value) - if err != nil { - return nil, err - } - out = append(out, jsonValue) - } - return out, nil - - case string: - return v, nil // require no conversion - - case int: - return json.NumberInt(v), nil - - case primitive.ObjectID: // ObjectId - return json.ObjectId(v.Hex()), nil - - case primitive.Decimal128: - return json.Decimal128{v}, nil - - case primitive.DateTime: // Date - return json.Date(v), nil - - case time.Time: // Date - return json.Date(v.Unix()*1000 + int64(v.Nanosecond()/1e6)), nil - - case int64: // NumberLong - return json.NumberLong(v), nil - - case int32: // NumberInt - return json.NumberInt(v), nil - - case float64: - return json.NumberFloat(v), nil - - case float32: - return json.NumberFloat(float64(v)), nil - - case []byte: // BinData (with generic type) - data := base64.StdEncoding.EncodeToString(v) - return json.BinData{0x00, data}, nil - - case primitive.Binary: // BinData - data := base64.StdEncoding.EncodeToString(v.Data) - return json.BinData{Type: v.Subtype, Base64: data}, nil - - case primitive.DBPointer: // DBPointer - return json.DBPointer{v.DB, v.Pointer}, nil - - case primitive.Regex: // RegExp - return json.RegExp{v.Pattern, v.Options}, nil - - case primitive.Timestamp: // Timestamp - return json.Timestamp{ - Seconds: v.T, - Increment: v.I, - }, nil - - case primitive.JavaScript: // JavaScript Code - return json.JavaScript{Code: string(v), Scope: nil}, nil - - case primitive.CodeWithScope: // JavaScript Code w/ Scope - var scope interface{} - var err error - if v.Scope != nil { - scope, err = GetBSONValueAsLegacyExtJSON(v.Scope) - if err != nil { - return nil, err - } - } - return json.JavaScript{string(v.Code), scope}, nil - - case primitive.MaxKey: // MaxKey - return json.MaxKey{}, nil - - case primitive.MinKey: // MinKey - return json.MinKey{}, nil - - case primitive.Undefined: // undefined - return json.Undefined{}, nil - - case primitive.Null: // Null - return nil, nil - } - - return nil, fmt.Errorf("conversion of BSON value '%v' of type '%T' not supported", x, x) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go deleted file mode 100644 index 8f00257e1..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go +++ /dev/null @@ -1,193 +0,0 @@ -package bsonutil - -import ( - "math" - "math/big" - - "github.com/mongodb/mongo-tools/common/log" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// validIndexOptions are taken from https://github.com/mongodb/mongo/blob/master/src/mongo/db/index/index_descriptor.h -var validIndexOptions = map[string]bool{ - "2dsphereIndexVersion": true, - "background": true, - "bits": true, - "bucketSize": true, - "coarsestIndexedLevel": true, - "collation": true, - "default_language": true, - "expireAfterSeconds": true, - "finestIndexedLevel": true, - "key": true, - "language_override": true, - "max": true, - "min": true, - "name": true, - "ns": true, - "partialFilterExpression": true, - "sparse": true, - "storageEngine": true, - "textIndexVersion": true, - "unique": true, - "v": true, - "weights": true, - "wildcardProjection": true, -} - -const epsilon = 1e-9 - -func IsIndexKeysEqual(indexKey1 bson.D, indexKey2 bson.D) bool { - if len(indexKey1) != len(indexKey2) { - // two indexes have different number of keys - return false - } - - for j, elem := range indexKey1 { - if elem.Key != indexKey2[j].Key { - return false - } - - // After ConvertLegacyIndexKeys, index key value should only be numerical or string value - switch key1Value := elem.Value.(type) { - case string: - if key2Value, ok := indexKey2[j].Value.(string); ok { - if key1Value == key2Value { - continue - } - } - return false - default: - if key1Value, ok := Bson2Float64(key1Value); ok { - if key2Value, ok := Bson2Float64(indexKey2[j].Value); ok { - if math.Abs(key1Value-key2Value) < epsilon { - continue - } - } - } - return false - } - } - return true -} - -// ConvertLegacyIndexKeys transforms the values of index definitions pre 3.4 into -// the stricter index definitions of 3.4+. Prior to 3.4, any value in an index key -// that isn't a negative number or that isn't a string is treated as int32(1). -// The one exception is an empty string is treated as int32(1). -// All other strings that aren't one of ["2d", "geoHaystack", "2dsphere", "hashed", "text", ""] -// will cause the index build to fail. See TOOLS-2412 for more information. -// -// This function logs the keys that are converted. -func ConvertLegacyIndexKeys(indexKey bson.D, ns string) { - var converted bool - originalJSONString := CreateExtJSONString(indexKey) - for j, elem := range indexKey { - switch v := elem.Value.(type) { - case int: - if v == 0 { - indexKey[j].Value = int32(1) - converted = true - } - case int32: - if v == int32(0) { - indexKey[j].Value = int32(1) - converted = true - } - case int64: - if v == int64(0) { - indexKey[j].Value = int32(1) - converted = true - } - case float64: - if math.Abs(v-float64(0)) < epsilon { - indexKey[j].Value = int32(1) - converted = true - } - case primitive.Decimal128: - if bi, _, err := v.BigInt(); err == nil { - if bi.Cmp(big.NewInt(0)) == 0 { - indexKey[j].Value = int32(1) - converted = true - } - } - case string: - // Only convert an empty string - if v == "" { - indexKey[j].Value = int32(1) - converted = true - } - default: - // Convert all types that aren't strings or numbers - indexKey[j].Value = int32(1) - converted = true - } - } - if converted { - newJSONString := CreateExtJSONString(indexKey) - log.Logvf(log.Always, "convertLegacyIndexes: converted index values '%s' to '%s' on collection '%s'", - originalJSONString, newJSONString, ns) - } -} - -// ConvertLegacyIndexOptions removes options that don't match a known list of index options. -// It is preferable to use the ignoreUnknownIndexOptions on the createIndex command to -// force the server to do this task. But that option was only added in 4.1.9. So for -// pre 3.4 indexes being added to servers 3.4 - 4.2, we must strip the options in the client. -// This function processes the indexes Options inside collection dump. -func ConvertLegacyIndexOptions(indexOptions bson.M) { - var converted bool - originalJSONString := CreateExtJSONString(indexOptions) - for key := range indexOptions { - if _, ok := validIndexOptions[key]; !ok { - delete(indexOptions, key) - converted = true - } - } - if converted { - newJSONString := CreateExtJSONString(indexOptions) - log.Logvf(log.Always, "convertLegacyIndexes: converted index options '%s' to '%s'", - originalJSONString, newJSONString) - } -} - -// ConvertLegacyIndexOptionsFromOp removes options that don't match a known list of index options. -// It is preferable to use the ignoreUnknownIndexOptions on the createIndex command to -// force the server to do this task. But that option was only added in 4.1.9. So for -// pre 3.4 indexes being added to servers 3.4 - 4.2, we must strip the options in the client. -// This function processes the index options inside createIndexes command. -func ConvertLegacyIndexOptionsFromOp(indexOptions *bson.D) { - var converted bool - originalJSONString := CreateExtJSONString(indexOptions) - var newIndexOptions bson.D - - for i, elem := range *indexOptions { - if _, ok := validIndexOptions[elem.Key]; !ok && elem.Key != "createIndexes" { - // Remove this key. - converted = true - } else { - newIndexOptions = append(newIndexOptions, (*indexOptions)[i]) - } - } - if converted { - *indexOptions = newIndexOptions - newJSONString := CreateExtJSONString(newIndexOptions) - log.Logvf(log.Always, "ConvertLegacyIndexOptionsFromOp: converted index options '%s' to '%s'", - originalJSONString, newJSONString) - } -} - -// CreateExtJSONString stringifies doc as Extended JSON. It does not error -// if it's unable to marshal the doc to JSON. -func CreateExtJSONString(doc interface{}) string { - // by default return """ since we don't - // want to throw an error when formatting informational messages. - // An error would be inconsequential. - JSONString := "" - JSONBytes, err := MarshalExtJSONReversible(doc, false, false) - if err == nil { - JSONString = string(JSONBytes) - } - return JSONString -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/marshal_d.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/marshal_d.go deleted file mode 100644 index 7424cc19a..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/marshal_d.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonutil - -import ( - "bytes" - "fmt" - - "github.com/mongodb/mongo-tools/common/json" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" -) - -// MarshalD is a wrapper for bson.D that allows unmarshalling -// of bson.D with preserved order. Necessary for printing -// certain database commands. -type MarshalD bson.D - -// MarshalJSON makes the MarshalD type usable by -// the encoding/json package. -func (md MarshalD) MarshalJSON() ([]byte, error) { - var buff bytes.Buffer - buff.WriteString("{") - for i, item := range md { - key, err := json.Marshal(item.Key) - if err != nil { - return nil, fmt.Errorf("cannot marshal key %v: %v", item.Key, err) - } - val, err := json.Marshal(item.Value) - if err != nil { - return nil, fmt.Errorf("cannot marshal value %v: %v", item.Value, err) - } - buff.Write(key) - buff.WriteString(":") - buff.Write(val) - if i != len(md)-1 { - buff.WriteString(",") - } - } - buff.WriteString("}") - return buff.Bytes(), nil -} - -// MakeSortString takes a bson.D object and converts it to a slice of strings -// that can be used as the input args to mgo's .Sort(...) function. -// For example: -// {a:1, b:-1} -> ["+a", "-b"] -func MakeSortString(sortObj bson.D) ([]string, error) { - sortStrs := make([]string, 0, len(sortObj)) - for _, docElem := range sortObj { - valueAsNumber, err := util.ToFloat64(docElem.Value) - if err != nil { - return nil, err - } - prefix := "+" - if valueAsNumber < 0 { - prefix = "-" - } - sortStrs = append(sortStrs, fmt.Sprintf("%v%v", prefix, docElem.Key)) - } - return sortStrs, nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/number.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/number.go deleted file mode 100644 index 3a00bb615..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/number.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package bsonutil - -import ( - "fmt" - "reflect" -) - -var floatType = reflect.TypeOf(float64(0)) - -func getFloat(unk interface{}) (float64, error) { - v := reflect.ValueOf(unk) - v = reflect.Indirect(v) - if !v.Type().ConvertibleTo(floatType) { - return 0, fmt.Errorf("cannot convert %v to float64", v.Type()) - } - fv := v.Convert(floatType) - return fv.Float(), nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/bson_stream.go b/vendor/github.com/mongodb/mongo-tools/common/db/bson_stream.go deleted file mode 100644 index 6c69c1cd1..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/bson_stream.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "fmt" - "io" - - "go.mongodb.org/mongo-driver/bson" -) - -// BSONSource reads documents from the underlying io.ReadCloser, Stream which -// wraps a stream of BSON documents. -type BSONSource struct { - reusableBuf []byte - Stream io.ReadCloser - err error - MaxBSONSize int32 -} - -// DecodedBSONSource reads documents from the underlying io.ReadCloser, Stream which -// wraps a stream of BSON documents. -type DecodedBSONSource struct { - RawDocSource - err error -} - -// RawDocSource wraps basic functions for reading a BSON source file. -type RawDocSource interface { - LoadNext() []byte - Close() error - Err() error -} - -// NewBSONSource creates a BSONSource with a reusable I/O buffer -func NewBSONSource(in io.ReadCloser) *BSONSource { - return &BSONSource{make([]byte, MaxBSONSize), in, nil, MaxBSONSize} -} - -// NewBufferlessBSONSource creates a BSONSource without a reusable I/O buffer -func NewBufferlessBSONSource(in io.ReadCloser) *BSONSource { - return &BSONSource{nil, in, nil, MaxBSONSize} -} - -// Close closes the BSONSource, rendering it unusable for I/O. -// It returns an error, if any. -func (bs *BSONSource) Close() error { - return bs.Stream.Close() -} - -func NewDecodedBSONSource(ds RawDocSource) *DecodedBSONSource { - return &DecodedBSONSource{ds, nil} -} - -// Err returns any error in the DecodedBSONSource or its RawDocSource. -func (dbs *DecodedBSONSource) Err() error { - if dbs.err != nil { - return dbs.err - } - return dbs.RawDocSource.Err() -} - -// NextGBSON unmarshals the next BSON document into result using the official go driver. Returns true if no errors are -// encountered and false otherwise. This function does NOT zero out the result before writing to it. -func (dbs *DecodedBSONSource) Next(result interface{}) bool { - doc := dbs.LoadNext() - if doc == nil { - return false - } - if err := bson.Unmarshal(doc, result); err != nil { - dbs.err = err - return false - } - dbs.err = nil - return true -} - -// LoadNext reads and returns the next BSON document in the stream. If the -// BSONSource was created with NewBSONSource then each returned []byte will be -// a slice of a single reused I/O buffer. If the BSONSource was created with -// NewBufferlessBSONSource then each returend []byte will be individually -// allocated -func (bs *BSONSource) LoadNext() []byte { - var into []byte - if bs.reusableBuf == nil { - into = make([]byte, 4) - } else { - into = bs.reusableBuf - } - // read the bson object size (a 4 byte integer) - _, err := io.ReadAtLeast(bs.Stream, into[0:4], 4) - if err != nil { - if err != io.EOF { - bs.err = err - return nil - } - // we hit EOF right away, so we're at the end of the stream. - bs.err = nil - return nil - } - - bsonSize := int32( - (uint32(into[0]) << 0) | - (uint32(into[1]) << 8) | - (uint32(into[2]) << 16) | - (uint32(into[3]) << 24), - ) - - // Verify that the size of the BSON object we are about to read can - // actually fit into the buffer that was provided. If not, either the BSON is - // invalid, or the buffer passed in is too small. - // Verify that we do not have an invalid BSON document with size < 5. - if bsonSize < 5 { - bs.err = fmt.Errorf("invalid BSONSize: %v bytes is less than 5 bytes", bsonSize) - return nil - } - - if bsonSize > bs.MaxBSONSize { - bs.err = fmt.Errorf("invalid BSONSize: %v bytes is larger than maximum of %d bytes", bsonSize, bs.MaxBSONSize) - return nil - } - - if int(bsonSize) > cap(into) { - bigInto := make([]byte, bsonSize) - copy(bigInto, into) - into = bigInto - if bs.reusableBuf != nil { - bs.reusableBuf = bigInto - } - } - into = into[:int(bsonSize)] - _, err = io.ReadAtLeast(bs.Stream, into[4:], int(bsonSize-4)) - if err != nil { - if err != io.EOF { - bs.err = err - return nil - } - // this case means we hit EOF but read a partial document, - // so there's a broken doc in the stream. Treat this as error. - bs.err = fmt.Errorf("invalid bson: %v", err) - return nil - } - - bs.err = nil - return into -} - -func (bs *BSONSource) Err() error { - return bs.err -} - -func (bs *BSONSource) SetMaxBSONSize(size int32) { - bs.MaxBSONSize = size -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go b/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go deleted file mode 100644 index 3d87d2fb6..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "context" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" -) - -// The default value of maxMessageSizeBytes -// See: https://docs.mongodb.com/manual/reference/command/hello/#mongodb-data-hello.maxMessageSizeBytes -const MAX_MESSAGE_SIZE_BYTES = 48000000 - -// BufferedBulkInserter implements a bufio.Writer-like design for queuing up -// documents and inserting them in bulk when the given doc limit (or max -// message size) is reached. Must be flushed at the end to ensure that all -// documents are written. -type BufferedBulkInserter struct { - collection *mongo.Collection - writeModels []mongo.WriteModel - docLimit int - docCount int - byteCount int - byteLimit int - bulkWriteOpts *options.BulkWriteOptions - upsert bool -} - -func newBufferedBulkInserter(collection *mongo.Collection, docLimit int, ordered bool) *BufferedBulkInserter { - bb := &BufferedBulkInserter{ - collection: collection, - bulkWriteOpts: options.BulkWrite().SetOrdered(ordered), - docLimit: docLimit, - // We set the byte limit to be slightly lower than maxMessageSizeBytes so it can fit in one OP_MSG. - // This may not always be perfect, e.g. we don't count update selectors in byte totals, but it should - // be good enough to keep memory consumption in check. - byteLimit: MAX_MESSAGE_SIZE_BYTES - 100, - writeModels: make([]mongo.WriteModel, 0, docLimit), - } - return bb -} - -// NewOrderedBufferedBulkInserter returns an initialized BufferedBulkInserter for performing ordered bulk writes. -func NewOrderedBufferedBulkInserter(collection *mongo.Collection, docLimit int) *BufferedBulkInserter { - return newBufferedBulkInserter(collection, docLimit, true) -} - -// NewOrderedBufferedBulkInserter returns an initialized BufferedBulkInserter for performing unordered bulk writes. -func NewUnorderedBufferedBulkInserter(collection *mongo.Collection, docLimit int) *BufferedBulkInserter { - return newBufferedBulkInserter(collection, docLimit, false) -} - -func (bb *BufferedBulkInserter) SetOrdered(ordered bool) *BufferedBulkInserter { - bb.bulkWriteOpts.SetOrdered(ordered) - return bb -} - -func (bb *BufferedBulkInserter) SetBypassDocumentValidation(bypass bool) *BufferedBulkInserter { - bb.bulkWriteOpts.SetBypassDocumentValidation(bypass) - return bb -} - -func (bb *BufferedBulkInserter) SetUpsert(upsert bool) *BufferedBulkInserter { - bb.upsert = upsert - return bb -} - -// throw away the old bulk and init a new one -func (bb *BufferedBulkInserter) ResetBulk() { - bb.writeModels = bb.writeModels[:0] - bb.docCount = 0 - bb.byteCount = 0 -} - -// Insert adds a document to the buffer for bulk insertion. If the buffer becomes full, the bulk write is performed, returning -// any error that occurs. -func (bb *BufferedBulkInserter) Insert(doc interface{}) (*mongo.BulkWriteResult, error) { - rawBytes, err := bson.Marshal(doc) - if err != nil { - return nil, fmt.Errorf("bson encoding error: %v", err) - } - - return bb.InsertRaw(rawBytes) -} - -// Update adds a document to the buffer for bulk update. If the buffer becomes full, the bulk write is performed, returning -// any error that occurs. -func (bb *BufferedBulkInserter) Update(selector, update bson.D) (*mongo.BulkWriteResult, error) { - rawBytes, err := bson.Marshal(update) - if err != nil { - return nil, err - } - bb.byteCount += len(rawBytes) - - return bb.addModel(mongo.NewUpdateOneModel().SetFilter(selector).SetUpdate(rawBytes).SetUpsert(bb.upsert)) -} - -// Replace adds a document to the buffer for bulk replacement. If the buffer becomes full, the bulk write is performed, returning -// any error that occurs. -func (bb *BufferedBulkInserter) Replace(selector, replacement bson.D) (*mongo.BulkWriteResult, error) { - rawBytes, err := bson.Marshal(replacement) - if err != nil { - return nil, err - } - bb.byteCount += len(rawBytes) - - return bb.addModel(mongo.NewReplaceOneModel().SetFilter(selector).SetReplacement(rawBytes).SetUpsert(bb.upsert)) -} - -// InsertRaw adds a document, represented as raw bson bytes, to the buffer for bulk insertion. If the buffer becomes full, -// the bulk write is performed, returning any error that occurs. -func (bb *BufferedBulkInserter) InsertRaw(rawBytes []byte) (*mongo.BulkWriteResult, error) { - bb.byteCount += len(rawBytes) - - return bb.addModel(mongo.NewInsertOneModel().SetDocument(rawBytes)) -} - -// Delete adds a document to the buffer for bulk removal. If the buffer becomes full, the bulk delete is performed, returning -// any error that occurs. -func (bb *BufferedBulkInserter) Delete(selector, replacement bson.D) (*mongo.BulkWriteResult, error) { - return bb.addModel(mongo.NewDeleteOneModel().SetFilter(selector)) -} - -// addModel adds a WriteModel to the buffer. If the buffer becomes full, the bulk write is performed, returning any error -// that occurs. -func (bb *BufferedBulkInserter) addModel(model mongo.WriteModel) (*mongo.BulkWriteResult, error) { - bb.docCount++ - bb.writeModels = append(bb.writeModels, model) - - if bb.docCount >= bb.docLimit || bb.byteCount >= bb.byteLimit { - return bb.Flush() - } - - return nil, nil -} - -// Flush writes all buffered documents in one bulk write and then resets the buffer. -func (bb *BufferedBulkInserter) Flush() (*mongo.BulkWriteResult, error) { - defer bb.ResetBulk() - return bb.flush() -} - -// TryFlush writes all buffered documents in one bulk write without resetting the buffer. -func (bb *BufferedBulkInserter) TryFlush() (*mongo.BulkWriteResult, error) { - return bb.flush() -} - -func (bb *BufferedBulkInserter) flush() (*mongo.BulkWriteResult, error) { - if bb.docCount == 0 { - return nil, nil - } - - return bb.collection.BulkWrite(context.Background(), bb.writeModels, bb.bulkWriteOpts) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/command.go b/vendor/github.com/mongodb/mongo-tools/common/db/command.go deleted file mode 100644 index ad255d0bb..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/command.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "context" - "fmt" - - "github.com/mongodb/mongo-tools/common/bsonutil" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" - mopt "go.mongodb.org/mongo-driver/mongo/options" -) - -// Query flags -const ( - Snapshot = 1 << iota - LogReplay - Prefetch -) - -type NodeType string - -const ( - Mongos NodeType = "mongos" - Standalone = "standalone" - ReplSet = "replset" - Unknown = "unknown" -) - -// CommandRunner exposes functions that can be run against a server -// XXX Does anything rely on this? -type CommandRunner interface { - Run(command interface{}, out interface{}, database string) error - RunString(commandName string, out interface{}, database string) error - FindOne(db, collection string, skip int, query interface{}, sort []string, into interface{}, opts int) error - Remove(db, collection string, query interface{}) error - DatabaseNames() ([]string, error) - CollectionNames(db string) ([]string, error) -} - -// // Remove removes all documents matched by query q in the db database and c collection. -// func (sp *SessionProvider) Remove(db, c string, q interface{}) error { -// session, err := sp.GetSession() -// if err != nil { -// return err -// } -// _, err = session.Database(db).Collection(c).RemoveAll(q) -// return err -// } -// -// Run issues the provided command on the db database and unmarshals its result -// into out. - -func (sp *SessionProvider) Run(command interface{}, out interface{}, name string) error { - db := sp.DB(name) - result := db.RunCommand(context.Background(), command) - if result.Err() != nil { - return result.Err() - } - err := result.Decode(out) - if err != nil { - return err - } - return nil -} - -func (sp *SessionProvider) RunString(commandName string, out interface{}, name string) error { - command := &bson.M{commandName: 1} - return sp.Run(command, out, name) -} - -func (sp *SessionProvider) DropDatabase(dbName string) error { - return sp.DB(dbName).Drop(context.Background()) -} - -func (sp *SessionProvider) CreateCollection(dbName, collName string) error { - command := &bson.M{"create": collName} - out := &bson.Raw{} - err := sp.Run(command, out, dbName) - return err -} - -func (sp *SessionProvider) ServerVersion() (string, error) { - out := struct{ Version string }{} - err := sp.RunString("buildInfo", &out, "admin") - if err != nil { - return "", err - } - return out.Version, nil -} - -func (sp *SessionProvider) ServerVersionArray() (Version, error) { - var version Version - out := struct { - VersionArray []int32 `bson:"versionArray"` - }{} - err := sp.RunString("buildInfo", &out, "admin") - if err != nil { - return version, fmt.Errorf("error getting buildInfo: %v", err) - } - if len(out.VersionArray) < 3 { - return version, fmt.Errorf("buildInfo.versionArray had fewer than 3 elements") - } - for i := 0; i <= 2; i++ { - version[i] = int(out.VersionArray[i]) - } - return version, nil -} - -// DatabaseNames returns a slice containing the names of all the databases on the -// connected server. -func (sp *SessionProvider) DatabaseNames() ([]string, error) { - return sp.client.ListDatabaseNames(nil, bson.D{}) -} - -// CollectionNames returns the names of all the collections in the dbName database. -// func (sp *SessionProvider) CollectionNames(dbName string) ([]string, error) { -// session, err := sp.GetSession() -// if err != nil { -// return nil, err -// } -// return session.DB(dbName).CollectionNames() -// } - -// IsAtlasProxy checks if the connected SessionProvider is an atlas proxy. -func (sp *SessionProvider) IsAtlasProxy() bool { - session, err := sp.GetSession() - if err != nil { - return false - } - - // Only the atlas proxy will respond to this command without an error. - result := session.Database("admin").RunCommand( - context.Background(), - &bson.M{"atlasVersion": 1}, - ) - return result.Err() == nil -} - -// GetNodeType checks if the connected SessionProvider is a mongos, standalone, or replset, -// by looking at the result of calling isMaster. -func (sp *SessionProvider) GetNodeType() (NodeType, error) { - session, err := sp.GetSession() - if err != nil { - return Unknown, err - } - masterDoc := struct { - SetName interface{} `bson:"setName"` - Hosts interface{} `bson:"hosts"` - Msg string `bson:"msg"` - }{} - result := session.Database("admin").RunCommand( - context.Background(), - &bson.M{"ismaster": 1}, - ) - if result.Err() != nil { - return Unknown, result.Err() - } - err = result.Decode(&masterDoc) - if err != nil { - return Unknown, err - } - if masterDoc.SetName != nil || masterDoc.Hosts != nil { - return ReplSet, nil - } else if masterDoc.Msg == "isdbgrid" { - // isdbgrid is always the msg value when calling isMaster on a mongos - // see http://docs.mongodb.org/manual/core/sharded-cluster-query-router/ - return Mongos, nil - } - return Standalone, nil -} - -// IsReplicaSet returns a boolean which is true if the connected server is part -// of a replica set. -func (sp *SessionProvider) IsReplicaSet() (bool, error) { - nodeType, err := sp.GetNodeType() - if err != nil { - return false, err - } - return nodeType == ReplSet, nil -} - -// IsMongos returns true if the connected server is a mongos. -func (sp *SessionProvider) IsMongos() (bool, error) { - nodeType, err := sp.GetNodeType() - if err != nil { - return false, err - } - return nodeType == Mongos, nil -} - -// -// // SupportsWriteCommands returns true if the connected server supports write -// // commands, returns false otherwise. -// func (sp *SessionProvider) SupportsWriteCommands() (bool, error) { -// session, err := sp.GetSession() -// if err != nil { -// return false, err -// } -// masterDoc := struct { -// Ok int `bson:"ok"` -// MaxWire int `bson:"maxWireVersion"` -// }{} -// err = session.Run("isMaster", &masterDoc) -// if err != nil { -// return false, err -// } -// // the connected server supports write commands if -// // the maxWriteVersion field is present -// return (masterDoc.Ok == 1 && masterDoc.MaxWire >= 2), nil -// } - -// FindOne retuns the first document in the collection and database that matches -// the query after skip, sort and query flags are applied. -func (sp *SessionProvider) FindOne(db, collection string, skip int, query interface{}, sort interface{}, into interface{}, flags int) error { - session, err := sp.GetSession() - if err != nil { - return err - } - - if query == nil { - query = bson.D{} - } - - opts := mopt.FindOne().SetSort(sort).SetSkip(int64(skip)) - ApplyFlags(opts, flags) - - res := session.Database(db).Collection(collection).FindOne(nil, query, opts) - err = res.Decode(into) - return err -} - -// ApplyFlags applies flags to the given query session. -func ApplyFlags(opts *mopt.FindOneOptions, flags int) { - if flags&Snapshot > 0 { - opts.SetHint(bson.D{{"_id", 1}}) - } - if flags&LogReplay > 0 { - opts.SetOplogReplay(true) - } -} - -// RunApplyOpsCreateIndex will create index using applyOps. -// For versions that support collection UUIDs (<3.6) it uses an insert to system indexes. -// Later versions use the createIndexes command. -func (sp *SessionProvider) RunApplyOpsCreateIndex(C, DB string, index bson.D, UUID *primitive.Binary, result *interface{}) error { - var op Oplog - - // Add an index version if it is missing. An index version could be missing because - // a tool stripped it out (e.g. mongorestore strips out versions before attempting createIndex) - // or because the index came from an oplog entry from versions <3.4. - _, err := bsonutil.FindValueByKey("v", &index) - if err != nil { - index = append(index, bson.E{Key: "v", Value: 1}) - } - - if UUID != nil { - o := append(bson.D{{Key: "createIndexes", Value: C}}, index...) - - op = Oplog{ - Operation: "c", - Namespace: fmt.Sprintf("%s.$cmd", DB), - Object: o, - UI: UUID, - } - } else { - op = Oplog{ - Operation: "i", - Namespace: fmt.Sprintf("%s.system.indexes", DB), - Object: index, - } - } - - err = sp.Run(bson.D{{Key: "applyOps", Value: []Oplog{op}}}, result, DB) - if err != nil { - return fmt.Errorf("error building index: %v", err) - } - return nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/db.go b/vendor/github.com/mongodb/mongo-tools/common/db/db.go deleted file mode 100644 index fba9b94e0..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/db.go +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package db implements generic connection to MongoDB, and contains -// subpackages for specific methods of connection. -package db - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "strings" - "sync" - "time" - - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/options" - "github.com/youmark/pkcs8" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - mopt "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/readconcern" - "go.mongodb.org/mongo-driver/mongo/readpref" - "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/tag" -) - -type ( - sessionFlag uint32 -) - -// Session flags. -const ( - None sessionFlag = 0 - Monotonic sessionFlag = 1 << iota - DisableSocketTimeout -) - -// MongoDB enforced limits. -const ( - MaxBSONSize = 16 * 1024 * 1024 // 16MB - maximum BSON document size -) - -// Default port for integration tests -const ( - DefaultTestPort = "33333" -) - -const ( - ErrLostConnection = "lost connection to server" - ErrNoReachableServers = "no reachable servers" - ErrNsNotFound = "ns not found" - // replication errors list the replset name if we are talking to a mongos, - // so we can only check for this universal prefix - ErrReplTimeoutPrefix = "waiting for replication timed out" - ErrCouldNotContactPrimaryPrefix = "could not contact primary for replica set" - ErrWriteResultsUnavailable = "write results unavailable from" - ErrCouldNotFindPrimaryPrefix = `could not find host matching read preference { mode: "primary"` - ErrUnableToTargetPrefix = "unable to target" - ErrNotMaster = "not master" - ErrConnectionRefusedSuffix = "Connection refused" - - // ignorable errors - ErrDuplicateKeyCode = 11000 - ErrFailedDocumentValidation = 121 - ErrUnacknowledgedWrite = "unacknowledged write" - - // ErrCannotInsertTimeseriesBucketsWithMixedSchema can be handled by turning TimeseriesBucketsWithMixedSchema off. - ErrCannotInsertTimeseriesBucketsWithMixedSchema = 408 -) - -var ignorableWriteErrorCodes = map[int]bool{ErrDuplicateKeyCode: true, ErrFailedDocumentValidation: true} - -const ( - continueThroughErrorFormat = "continuing through error: %v" -) - -// Used to manage database sessions -type SessionProvider struct { - sync.Mutex - - // the master client used for operations - client *mongo.Client -} - -// Returns a mongo.Client connected to the database server for which the -// session provider is configured. -func (sp *SessionProvider) GetSession() (*mongo.Client, error) { - sp.Lock() - defer sp.Unlock() - - if sp.client == nil { - return nil, errors.New("SessionProvider already closed") - } - - return sp.client, nil -} - -// Close closes the master session in the connection pool -func (sp *SessionProvider) Close() { - sp.Lock() - defer sp.Unlock() - if sp.client != nil { - _ = sp.client.Disconnect(context.Background()) - sp.client = nil - } -} - -// DB provides a database with the default read preference -func (sp *SessionProvider) DB(name string) *mongo.Database { - return sp.client.Database(name) -} - -// NewSessionProvider constructs a session provider, including a connected client. -func NewSessionProvider(opts options.ToolOptions) (*SessionProvider, error) { - client, err := configureClient(opts) - if err != nil { - return nil, fmt.Errorf("error configuring the connector: %v", err) - } - err = client.Connect(context.Background()) - if err != nil { - return nil, err - } - err = client.Ping(context.Background(), nil) - if err != nil { - return nil, fmt.Errorf("failed to connect to %s: %v", opts.URI.ParsedConnString(), err) - } - - // create the provider - return &SessionProvider{client: client}, nil -} - -// addClientCertFromFile adds a client certificate to the configuration given a path to the -// containing file and returns the certificate's subject name. -func addClientCertFromFile(cfg *tls.Config, clientFile, keyPassword string) (string, error) { - data, err := ioutil.ReadFile(clientFile) - if err != nil { - return "", err - } - - return addClientCertFromBytes(cfg, data, keyPassword) -} - -func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassword string) (string, error) { - keyData, err := ioutil.ReadFile(keyFile) - if err != nil { - return "", err - } - certData, err := ioutil.ReadFile(certFile) - if err != nil { - return "", err - } - - data := append(keyData, '\n') - data = append(data, certData...) - return addClientCertFromBytes(cfg, data, keyPassword) -} - -// addClientCertFromBytes adds a client certificate to the configuration given a path to the -// containing file and returns the certificate's subject name. -func addClientCertFromBytes(cfg *tls.Config, data []byte, keyPasswd string) (string, error) { - var currentBlock *pem.Block - var certDecodedBlock []byte - var certBlocks, keyBlocks [][]byte - - remaining := data - start := 0 - for { - currentBlock, remaining = pem.Decode(remaining) - if currentBlock == nil { - break - } - - if currentBlock.Type == "CERTIFICATE" { - certBlock := data[start : len(data)-len(remaining)] - certBlocks = append(certBlocks, certBlock) - start += len(certBlock) - - // Use the first cert block for the returned Subject string at the end. - if len(certDecodedBlock) == 0 { - certDecodedBlock = currentBlock.Bytes - } - } else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") { - isEncrypted := x509.IsEncryptedPEMBlock(currentBlock) || strings.Contains(currentBlock.Type, "ENCRYPTED PRIVATE KEY") - if isEncrypted { - if keyPasswd == "" { - return "", fmt.Errorf("no password provided to decrypt private key") - } - - var keyBytes []byte - var err error - // Process the X.509-encrypted or PKCS-encrypted PEM block. - if x509.IsEncryptedPEMBlock(currentBlock) { - // Only covers encrypted PEM data with a DEK-Info header. - keyBytes, err = x509.DecryptPEMBlock(currentBlock, []byte(keyPasswd)) - if err != nil { - return "", err - } - } else if strings.Contains(currentBlock.Type, "ENCRYPTED") { - // The pkcs8 package only handles the PKCS #5 v2.0 scheme. - decrypted, err := pkcs8.ParsePKCS8PrivateKey(currentBlock.Bytes, []byte(keyPasswd)) - if err != nil { - return "", err - } - keyBytes, err = x509.MarshalPKCS8PrivateKey(decrypted) - if err != nil { - return "", err - } - } - - var encoded bytes.Buffer - pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: keyBytes}) - keyBlock := encoded.Bytes() - keyBlocks = append(keyBlocks, keyBlock) - start = len(data) - len(remaining) - } else { - keyBlock := data[start : len(data)-len(remaining)] - keyBlocks = append(keyBlocks, keyBlock) - start += len(keyBlock) - } - } - } - - if len(certBlocks) == 0 { - return "", fmt.Errorf("failed to find CERTIFICATE") - } - if len(keyBlocks) == 0 { - return "", fmt.Errorf("failed to find PRIVATE KEY") - } - - cert, err := tls.X509KeyPair(bytes.Join(certBlocks, []byte("\n")), bytes.Join(keyBlocks, []byte("\n"))) - if err != nil { - return "", err - } - - cfg.Certificates = append(cfg.Certificates, cert) - - // The documentation for the tls.X509KeyPair indicates that the Leaf certificate is not - // retained. - crt, err := x509.ParseCertificate(certDecodedBlock) - if err != nil { - return "", err - } - - return crt.Subject.String(), nil -} - -// create a username for x509 authentication from an x509 certificate subject. -func extractX509UsernameFromSubject(subject string) string { - // the Go x509 package gives the subject with the pairs in the reverse order from what we want. - pairs := strings.Split(subject, ",") - for left, right := 0, len(pairs)-1; left < right; left, right = left+1, right-1 { - pairs[left], pairs[right] = pairs[right], pairs[left] - } - - return strings.Join(pairs, ",") -} - -// addCACertsFromFile adds root CA certificate and all the intermediate certificates in the same file to the configuration given a path -// to the containing file. -func addCACertsFromFile(cfg *tls.Config, file string) error { - data, err := ioutil.ReadFile(file) - if err != nil { - return err - } - - if cfg.RootCAs == nil { - cfg.RootCAs = x509.NewCertPool() - } - - if cfg.RootCAs.AppendCertsFromPEM(data) == false { - return fmt.Errorf("SSL trusted server certificates file does not contain any valid certificates. File: `%v`", file) - } - return nil -} - -// configure the client according to the options set in the uri and in the provided ToolOptions, with ToolOptions having precedence. -func configureClient(opts options.ToolOptions) (*mongo.Client, error) { - if opts.URI == nil || opts.URI.ConnectionString == "" { - // XXX Normal operations shouldn't ever reach here because a URI should - // be created in options parsing, but tests still manually construct - // options and generally don't construct a URI, so we invoke the URI - // normalization routine here to correct for that. - opts.NormalizeOptionsAndURI() - } - - clientopt := mopt.Client() - cs := opts.URI.ParsedConnString() - - clientopt.Hosts = cs.Hosts - - if opts.RetryWrites != nil { - clientopt.SetRetryWrites(*opts.RetryWrites) - } - - clientopt.SetConnectTimeout(time.Duration(opts.Timeout) * time.Second) - clientopt.SetSocketTimeout(time.Duration(opts.SocketTimeout) * time.Second) - if opts.Connection.ServerSelectionTimeout > 0 { - clientopt.SetServerSelectionTimeout(time.Duration(opts.Connection.ServerSelectionTimeout) * time.Second) - } - if opts.ReplicaSetName != "" { - clientopt.SetReplicaSet(opts.ReplicaSetName) - } - - clientopt.SetAppName(opts.AppName) - if opts.Direct && len(clientopt.Hosts) == 1 { - clientopt.SetDirect(true) - t := true - clientopt.AuthenticateToAnything = &t - } - - if opts.ReadPreference != nil { - clientopt.SetReadPreference(opts.ReadPreference) - } - if opts.WriteConcern != nil { - clientopt.SetWriteConcern(opts.WriteConcern) - } else { - // If no write concern was specified, default to majority - clientopt.SetWriteConcern(writeconcern.New(writeconcern.WMajority())) - } - - if opts.Compressors != "" && opts.Compressors != "none" { - clientopt.SetCompressors(strings.Split(opts.Compressors, ",")) - } - - if cs.ZlibLevelSet { - clientopt.SetZlibLevel(cs.ZlibLevel) - } - if cs.ZstdLevelSet { - clientopt.SetZstdLevel(cs.ZstdLevel) - } - - if cs.HeartbeatIntervalSet { - clientopt.SetHeartbeatInterval(cs.HeartbeatInterval) - } - - if cs.LocalThresholdSet { - clientopt.SetLocalThreshold(cs.LocalThreshold) - } - - if cs.MaxConnIdleTimeSet { - clientopt.SetMaxConnIdleTime(cs.MaxConnIdleTime) - } - - if cs.MaxPoolSizeSet { - clientopt.SetMaxPoolSize(cs.MaxPoolSize) - } - - if cs.MinPoolSizeSet { - clientopt.SetMinPoolSize(cs.MinPoolSize) - } - - if cs.LoadBalancedSet { - clientopt.SetLoadBalanced(cs.LoadBalanced) - } - - if cs.ReadConcernLevel != "" { - rc := readconcern.New(readconcern.Level(cs.ReadConcernLevel)) - clientopt.SetReadConcern(rc) - } - - if cs.ReadPreference != "" || len(cs.ReadPreferenceTagSets) > 0 || cs.MaxStalenessSet { - readPrefOpts := make([]readpref.Option, 0, 1) - - tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets) - if len(tagSets) > 0 { - readPrefOpts = append(readPrefOpts, readpref.WithTagSets(tagSets...)) - } - - if cs.MaxStaleness != 0 { - readPrefOpts = append(readPrefOpts, readpref.WithMaxStaleness(cs.MaxStaleness)) - } - - mode, err := readpref.ModeFromString(cs.ReadPreference) - if err != nil { - return nil, err - } - - readPref, err := readpref.New(mode, readPrefOpts...) - if err != nil { - return nil, err - } - - clientopt.SetReadPreference(readPref) - } - - if cs.RetryReadsSet { - clientopt.SetRetryReads(cs.RetryReads) - } - - if cs.JSet || cs.WString != "" || cs.WNumberSet || cs.WTimeoutSet { - opts := make([]writeconcern.Option, 0, 1) - - if len(cs.WString) > 0 { - opts = append(opts, writeconcern.WTagSet(cs.WString)) - } else if cs.WNumberSet { - opts = append(opts, writeconcern.W(cs.WNumber)) - } - - if cs.JSet { - opts = append(opts, writeconcern.J(cs.J)) - } - - if cs.WTimeoutSet { - opts = append(opts, writeconcern.WTimeout(cs.WTimeout)) - } - - clientopt.SetWriteConcern(writeconcern.New(opts...)) - } - - if opts.Auth != nil && opts.Auth.IsSet() { - cred := mopt.Credential{ - Username: opts.Auth.Username, - Password: opts.Auth.Password, - AuthSource: opts.GetAuthenticationDatabase(), - AuthMechanism: opts.Auth.Mechanism, - } - if cs.AuthMechanism == "MONGODB-AWS" { - cred.Username = cs.Username - cred.Password = cs.Password - cred.AuthSource = cs.AuthSource - cred.AuthMechanism = cs.AuthMechanism - cred.AuthMechanismProperties = cs.AuthMechanismProperties - } - // Technically, an empty password is possible, but the tools don't have the - // means to easily distinguish and so require a non-empty password. - if cred.Password != "" { - cred.PasswordSet = true - } - if opts.Kerberos != nil && cred.AuthMechanism == "GSSAPI" { - props := make(map[string]string) - if opts.Kerberos.Service != "" { - props["SERVICE_NAME"] = opts.Kerberos.Service - } - // XXX How do we use opts.Kerberos.ServiceHost if at all? - cred.AuthMechanismProperties = props - } - clientopt.SetAuth(cred) - } - - if opts.SSL != nil && opts.UseSSL { - // Error on unsupported features - if opts.SSLFipsMode { - return nil, fmt.Errorf("FIPS mode not supported") - } - if opts.SSLCRLFile != "" { - return nil, fmt.Errorf("CRL files are not supported on this platform") - } - - // #nosec G402 -- we intentionally allow old TLS versions for backwards compatibility - tlsConfig := &tls.Config{} - if opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.TLSInsecure { - tlsConfig.InsecureSkipVerify = true - } - - var x509Subject string - keyPasswd := opts.SSL.SSLPEMKeyPassword - var err error - if cs.SSLClientCertificateKeyPasswordSet && cs.SSLClientCertificateKeyPassword != nil { - keyPasswd = cs.SSLClientCertificateKeyPassword() - } - if cs.SSLClientCertificateKeyFileSet { - x509Subject, err = addClientCertFromFile(tlsConfig, cs.SSLClientCertificateKeyFile, keyPasswd) - } else if cs.SSLCertificateFileSet || cs.SSLPrivateKeyFileSet { - x509Subject, err = addClientCertFromSeparateFiles(tlsConfig, cs.SSLCertificateFile, cs.SSLPrivateKeyFile, keyPasswd) - } - if err != nil { - return nil, fmt.Errorf("error configuring client, can't load client certificate: %v", err) - } - if opts.SSLCAFile != "" { - if err := addCACertsFromFile(tlsConfig, opts.SSLCAFile); err != nil { - return nil, fmt.Errorf("error configuring client, can't load CA file: %v", err) - } - } - - // If a username wasn't specified for x509, add one from the certificate. - if clientopt.Auth != nil && strings.ToLower(clientopt.Auth.AuthMechanism) == "mongodb-x509" && clientopt.Auth.Username == "" { - // The Go x509 package gives the subject with the pairs in reverse order that we want. - clientopt.Auth.Username = extractX509UsernameFromSubject(x509Subject) - } - - clientopt.SetTLSConfig(tlsConfig) - } - - if cs.SSLDisableOCSPEndpointCheckSet { - clientopt.SetDisableOCSPEndpointCheck(cs.SSLDisableOCSPEndpointCheck) - } - - return mongo.NewClient(clientopt) -} - -// FilterError determines whether an error needs to be propagated back to the user or can be continued through. If an -// error cannot be ignored, a non-nil error is returned. If an error can be continued through, it is logged and nil is -// returned. -func FilterError(stopOnError bool, err error) error { - if err == nil || err.Error() == ErrUnacknowledgedWrite { - return nil - } - - if !stopOnError && CanIgnoreError(err) { - // Just log the error but don't propagate it. - if bwe, ok := err.(mongo.BulkWriteException); ok { - for _, be := range bwe.WriteErrors { - log.Logvf(log.Always, continueThroughErrorFormat, be.Message) - } - } else { - log.Logvf(log.Always, continueThroughErrorFormat, err) - } - return nil - } - // Propagate this error, since it's either a fatal error or the user has turned on --stopOnError - return err -} - -// Returns whether the tools can continue when encountering the given error. -// Currently, only DuplicateKeyErrors are ignorable. -func CanIgnoreError(err error) bool { - if err == nil { - return true - } - - switch mongoErr := err.(type) { - case mongo.WriteError: - _, ok := ignorableWriteErrorCodes[mongoErr.Code] - return ok - case mongo.BulkWriteException: - for _, writeErr := range mongoErr.WriteErrors { - if _, ok := ignorableWriteErrorCodes[writeErr.Code]; !ok { - return false - } - } - - if mongoErr.WriteConcernError != nil { - log.Logvf(log.Always, "write concern error when inserting documents: %v", mongoErr.WriteConcernError) - return false - } - return true - case mongo.CommandError: - _, ok := ignorableWriteErrorCodes[int(mongoErr.Code)] - return ok - } - - return false -} - -// Returns a boolean based on whether the given error indicates that this timeseries collection needs to be updated to set `timeseriesBucketsMayHaveMixedSchemaData` to `true`. -func TimeseriesBucketNeedsMixedSchema(err error) bool { - if err == nil { - return false - } - - switch mongoErr := err.(type) { - case mongo.WriteError: - return mongoErr.Code == ErrCannotInsertTimeseriesBucketsWithMixedSchema - - case mongo.BulkWriteException: - for _, writeErr := range mongoErr.WriteErrors { - if writeErr.Code == ErrCannotInsertTimeseriesBucketsWithMixedSchema { - return true - } - } - return false - } - return false -} - -// IsMMAPV1 returns whether the storage engine is MMAPV1. Also returns false -// if the storage engine type cannot be determined for some reason. -func IsMMAPV1(database *mongo.Database, collectionName string) (bool, error) { - // mmapv1 does not announce itself like other storage engines. Instead, - // we check for the key 'numExtents', which only occurs on MMAPV1. - const numExtents = "numExtents" - - var collStats map[string]interface{} - - singleRes := database.RunCommand(context.Background(), bson.M{"collStats": collectionName}) - - if err := singleRes.Err(); err != nil { - return false, err - } - - if err := singleRes.Decode(&collStats); err != nil { - return false, err - } - - _, ok := collStats[numExtents] - return ok, nil -} - -// GetTimeseriesCollNameFromBucket returns a timeseries collection name from its bucket collection name. -func GetTimeseriesCollNameFromBucket(bucketCollName string) (string, error) { - collName := strings.TrimPrefix(bucketCollName, "system.buckets.") - if collName == bucketCollName || collName == "" { - return "", errors.New("invalid timeseries bucket name: " + bucketCollName) - } - return collName, nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/namespaces.go b/vendor/github.com/mongodb/mongo-tools/common/db/namespaces.go deleted file mode 100644 index 95187864b..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/namespaces.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "context" - "encoding/hex" - "fmt" - "strings" - - "github.com/mongodb/mongo-tools/common/log" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/mongo" -) - -type CollectionInfo struct { - Name string `bson:"name"` - Type string `bson:"type"` - Options bson.D `bson:"options"` - Info bson.M `bson:"info"` -} - -func (ci *CollectionInfo) IsView() bool { - return ci.Type == "view" -} - -func (ci *CollectionInfo) IsTimeseries() bool { - return ci.Type == "timeseries" -} - -func (ci *CollectionInfo) IsSystemCollection() bool { - return strings.HasPrefix(ci.Name, "system.") -} - -func (ci *CollectionInfo) GetUUID() string { - if ci.Info == nil { - return "" - } - if v, ok := ci.Info["uuid"]; ok { - switch x := v.(type) { - case primitive.Binary: - if x.Subtype == 4 { - return hex.EncodeToString(x.Data) - } - default: - log.Logvf(log.DebugHigh, "unknown UUID BSON type '%T'", v) - } - } - return "" -} - -// GetIndexes returns an iterator to thethe raw index info for a collection by -// using the listIndexes command if available, or by falling back to querying -// against system.indexes (pre-3.0 systems). nil is returned if the collection -// does not exist. -func GetIndexes(coll *mongo.Collection) (*mongo.Cursor, error) { - return coll.Indexes().List(context.Background()) -} - -// Assumes that mongo.Database will normalize legacy names to omit database -// name as required by the Enumerate Collections spec -func GetCollections(database *mongo.Database, name string) (*mongo.Cursor, error) { - filter := bson.D{} - if len(name) > 0 { - filter = append(filter, primitive.E{"name", name}) - } - - cursor, err := database.ListCollections(nil, filter) - if err != nil { - return nil, err - } - - return cursor, nil -} - -func GetCollectionInfo(coll *mongo.Collection) (*CollectionInfo, error) { - iter, err := GetCollections(coll.Database(), coll.Name()) - if err != nil { - return nil, err - } - defer iter.Close(context.Background()) - comparisonName := coll.Name() - - var foundCollInfo *CollectionInfo - for iter.Next(nil) { - collInfo := &CollectionInfo{} - err = iter.Decode(collInfo) - if err != nil { - return nil, err - } - if collInfo.Name == comparisonName { - foundCollInfo = collInfo - break - } - } - if err := iter.Err(); err != nil { - return nil, err - } - return foundCollInfo, nil -} - -func StripDBFromNamespace(namespace string, dbName string) (string, error) { - namespacePrefix := dbName + "." - // if the collection info came from querying system.indexes (2.6 or earlier) then the - // "name" we get includes the db name as well, so we must remove it - if strings.HasPrefix(namespace, namespacePrefix) { - return namespace[len(namespacePrefix):], nil - } - return "", fmt.Errorf("namespace '%v' format is invalid - expected to start with '%v'", namespace, namespacePrefix) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go b/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go deleted file mode 100644 index 8f26fc0a7..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go +++ /dev/null @@ -1,187 +0,0 @@ -package db - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/mongo" - mopts "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/readconcern" -) - -// ApplyOpsResponse represents the response from an 'applyOps' command. -type ApplyOpsResponse struct { - Ok bool `bson:"ok"` - ErrMsg string `bson:"errmsg"` -} - -// Oplog represents a MongoDB oplog document. -type Oplog struct { - Timestamp primitive.Timestamp `bson:"ts"` - Term *int64 `bson:"t"` - Hash *int64 `bson:"h,omitempty"` - Version int `bson:"v"` - Operation string `bson:"op"` - Namespace string `bson:"ns"` - Object bson.D `bson:"o"` - Query bson.D `bson:"o2,omitempty"` - UI *primitive.Binary `bson:"ui,omitempty"` - LSID bson.Raw `bson:"lsid,omitempty"` - TxnNumber *int64 `bson:"txnNumber,omitempty"` - PrevOpTime bson.Raw `bson:"prevOpTime,omitempty"` - MultiOpType *int `bson:"multiOpType,omitempty"` -} - -// OplogTailTime represents two ways of describing the "end" of the oplog at a -// point in time. The Latest field represents the last visible (storage -// committed) timestamp. The Restart field represents a (possibly older) -// timestamp that can be used to start tailing or copying the oplog without -// losing parts of transactions in progress. -type OplogTailTime struct { - Latest OpTime - Restart OpTime -} - -var zeroTimestamp = primitive.Timestamp{} - -// GetOpTimeFromRawOplogEntry looks up the ts (timestamp), t (term), and -// h (hash) fields in a raw oplog entry, and assigns them to an OpTime. -// If the Timestamp can't be found or is an invalid format, it throws an error. -// If the Term or Hash fields can't be found, it returns the OpTime without them. -func GetOpTimeFromRawOplogEntry(rawOplogEntry bson.Raw) (OpTime, error) { - // Look up the timestamp and store it in an opTime. - rawTS, err := rawOplogEntry.LookupErr("ts") - if err != nil { - return OpTime{}, fmt.Errorf("raw oplog entry had no `ts` field") - } - - t, i, ok := rawTS.TimestampOK() - if !ok { - return OpTime{}, fmt.Errorf("raw oplog entry `ts` field was not a BSON timestamp") - } - - opTime := OpTime{ - Timestamp: primitive.Timestamp{T: t, I: i}, - Term: nil, - Hash: nil, - } - - // Look up the term and (if it exists) assign it to the opTime. - rawTerm, err := rawOplogEntry.LookupErr("t") - if err == nil { - term, ok := rawTerm.Int64OK() - if !ok { - return OpTime{}, fmt.Errorf("raw oplog entry `t` field was not a BSON int64") - } - opTime.Term = &term - } - - // Look up the hash and (if it exists) assign it to the opTime. - rawHash, err := rawOplogEntry.LookupErr("h") - if err == nil { - hash, ok := rawHash.Int64OK() - if !ok { - return OpTime{}, fmt.Errorf("raw oplog entry `h` field was not a BSON int64") - } - opTime.Hash = &hash - } - - return opTime, nil -} - -// GetOplogTailTime constructs an OplogTailTime -func GetOplogTailTime(client *mongo.Client) (OplogTailTime, error) { - // Check oldest active first to be sure it is less-than-or-equal to the - // latest visible. - oldestActive, err := GetOldestActiveTransactionOpTime(client) - if err != nil { - return OplogTailTime{}, err - } - latestVisible, err := GetLatestVisibleOplogOpTime(client) - if err != nil { - return OplogTailTime{}, err - } - // No oldest active means the latest visible is the restart time as well. - if OpTimeIsEmpty(oldestActive) { - return OplogTailTime{Latest: latestVisible, Restart: latestVisible}, nil - } - return OplogTailTime{Latest: latestVisible, Restart: oldestActive}, nil -} - -// GetOldestActiveTransactionOpTime returns the oldest active transaction -// optime from the config.transactions table or else a zero-value db.OpTime{} -func GetOldestActiveTransactionOpTime(client *mongo.Client) (OpTime, error) { - coll := client.Database("config").Collection("transactions", mopts.Collection().SetReadConcern(readconcern.Local())) - filter := bson.D{{"state", bson.D{{"$in", bson.A{"prepared", "inProgress"}}}}} - opts := mopts.FindOne().SetSort(bson.D{{"startOpTime", 1}}) - - result, err := coll.FindOne(context.Background(), filter, opts).DecodeBytes() - if err != nil { - if err == mongo.ErrNoDocuments { - return OpTime{}, nil - } - return OpTime{}, fmt.Errorf("config.transactions.findOne error: %v", err) - } - - startOpTimeRaw := result.Lookup("startOpTime") - opTime, err := GetOpTimeFromRawOplogEntry(startOpTimeRaw.Value) - if err != nil { - return OpTime{}, errors.Wrap(err, "config.transactions error") - } - return opTime, nil -} - -// GetLatestVisibleOplogOpTime returns the optime of the most recent -// "visible" oplog record. By "visible", we mean that all prior oplog entries -// have been storage-committed. See SERVER-30724 for a more detailed description. -func GetLatestVisibleOplogOpTime(client *mongo.Client) (OpTime, error) { - latestOpTime, err := GetLatestOplogOpTime(client, bson.D{}) - if err != nil { - return OpTime{}, err - } - // Do a forward scan starting at the last op fetched to ensure that - // all operations with earlier oplog times have been storage-committed. - opts := mopts.FindOne().SetOplogReplay(true) - coll := client.Database("local").Collection("oplog.rs") - result, err := coll.FindOne(context.Background(), bson.M{"ts": bson.M{"$gte": latestOpTime.Timestamp}}, opts).DecodeBytes() - if err != nil { - if err == mongo.ErrNoDocuments { - return OpTime{}, fmt.Errorf("last op was not confirmed. last optime: %+v. confirmation time was not found", - latestOpTime) - } - return OpTime{}, err - } - - opTime, err := GetOpTimeFromRawOplogEntry(result) - if err != nil { - return OpTime{}, errors.Wrap(err, "local.oplog.rs error") - } - - if !OpTimeEquals(opTime, latestOpTime) { - return OpTime{}, fmt.Errorf("last op was not confirmed. last optime: %+v. confirmation time: %+v", - latestOpTime, opTime) - } - return latestOpTime, nil -} - -// GetLatestOplogOpTime returns the optime of the most recent oplog -// record satisfying the given `query` or a zero-value db.OpTime{} if -// no oplog record matches. This method does not ensure that all prior oplog -// entries are visible (i.e. have been storage-committed). -func GetLatestOplogOpTime(client *mongo.Client, query interface{}) (OpTime, error) { - var record Oplog - opts := mopts.FindOne().SetProjection(bson.M{"ts": 1, "t": 1, "h": 1}).SetSort(bson.D{{"$natural", -1}}) - coll := client.Database("local").Collection("oplog.rs") - res := coll.FindOne(context.Background(), query, opts) - if err := res.Err(); err != nil { - return OpTime{}, err - } - - if err := res.Decode(&record); err != nil { - return OpTime{}, err - } - return GetOpTimeFromOplogEntry(&record), nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/optime.go b/vendor/github.com/mongodb/mongo-tools/common/db/optime.go deleted file mode 100644 index 668ded2ae..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/optime.go +++ /dev/null @@ -1,95 +0,0 @@ -package db - -import ( - "fmt" - - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// OpTime represents the values to uniquely identify an oplog entry. -// An OpTime must always have a timestamp, but may or may not have a term. -// The hash is set uniquely up until (and including) version 4.0, but is set -// to zero in version 4.2+ with plans to remove it soon (see SERVER-36334). -type OpTime struct { - Timestamp primitive.Timestamp `json:"timestamp"` - Term *int64 `json:"term"` - Hash *int64 `json:"hash"` -} - -// GetOpTimeFromOplogEntry returns an OpTime struct from the relevant fields in an Oplog struct. -func GetOpTimeFromOplogEntry(oplogEntry *Oplog) OpTime { - return OpTime{ - Timestamp: oplogEntry.Timestamp, - Term: oplogEntry.Term, - Hash: oplogEntry.Hash, - } -} - -// OpTimeIsEmpty returns true if opTime is uninitialized, false otherwise. -func OpTimeIsEmpty(opTime OpTime) bool { - return opTime == OpTime{} -} - -// OpTimeEquals returns true if lhs equals rhs, false otherwise. -// We first check for nil / not nil mismatches between the terms and -// between the hashes. Then we check for equality between the terms and -// between the hashes (if they exist) before checking the timestamps. -func OpTimeEquals(lhs OpTime, rhs OpTime) bool { - if (lhs.Term == nil && rhs.Term != nil) || (lhs.Term != nil && rhs.Term == nil) || - (lhs.Hash == nil && rhs.Hash != nil) || (lhs.Hash != nil && rhs.Hash == nil) { - return false - } - - termsBothNilOrEqual := true - if lhs.Term != nil && rhs.Term != nil { - termsBothNilOrEqual = *lhs.Term == *rhs.Term - } - - hashesBothNilOrEqual := true - if lhs.Hash != nil && rhs.Hash != nil { - hashesBothNilOrEqual = *lhs.Hash == *rhs.Hash - } - - return lhs.Timestamp.Equal(rhs.Timestamp) && termsBothNilOrEqual && hashesBothNilOrEqual -} - -// OpTimeLessThan returns true if lhs comes before rhs, false otherwise. -// We first check if both the terms exist. If they don't or they're equal, -// we compare just the timestamps. -func OpTimeLessThan(lhs OpTime, rhs OpTime) bool { - if lhs.Term != nil && rhs.Term != nil { - if *lhs.Term == *rhs.Term { - return util.TimestampLessThan(lhs.Timestamp, rhs.Timestamp) - } - return *lhs.Term < *rhs.Term - } - - return util.TimestampLessThan(lhs.Timestamp, rhs.Timestamp) -} - -// OpTimeGreaterThan returns true if lhs comes after rhs, false otherwise. -// We first check if both the terms exist. If they don't or they're equal, -// we compare just the timestamps. -func OpTimeGreaterThan(lhs OpTime, rhs OpTime) bool { - if lhs.Term != nil && rhs.Term != nil { - if *lhs.Term == *rhs.Term { - return util.TimestampGreaterThan(lhs.Timestamp, rhs.Timestamp) - } - return *lhs.Term > *rhs.Term - } - - return util.TimestampGreaterThan(lhs.Timestamp, rhs.Timestamp) -} - -func (ot OpTime) String() string { - if ot.Term != nil && ot.Hash != nil { - return fmt.Sprintf("{Timestamp: %v, Term: %v, Hash: %v}", ot.Timestamp, *ot.Term, *ot.Hash) - } else if ot.Term == nil && ot.Hash != nil { - return fmt.Sprintf("{Timestamp: %v, Term: %v, Hash: %v}", ot.Timestamp, nil, *ot.Hash) - } else if ot.Term != nil && ot.Hash == nil { - return fmt.Sprintf("{Timestamp: %v, Term: %v, Hash: %v}", ot.Timestamp, *ot.Term, nil) - } - - return fmt.Sprintf("{Timestamp: %v, Term: %v, Hash: %v}", ot.Timestamp, nil, nil) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/query.go b/vendor/github.com/mongodb/mongo-tools/common/db/query.go deleted file mode 100644 index dbd45a961..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/query.go +++ /dev/null @@ -1,56 +0,0 @@ -package db - -import ( - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - mopt "go.mongodb.org/mongo-driver/mongo/options" -) - -// DeferredQuery represents a deferred query -type DeferredQuery struct { - Coll *mongo.Collection - Filter interface{} - Hint interface{} - LogReplay bool -} - -// Count issues a EstimatedDocumentCount command when there is no Filter in the query and a CountDocuments command otherwise. -func (q *DeferredQuery) Count(isView bool) (int, error) { - emptyFilter := false - - filter := q.Filter - if q.Filter == nil { - emptyFilter = true - filter = bson.D{} - } else if val, ok := q.Filter.(bson.D); ok && (val == nil || len(val.Map()) == 0) { - emptyFilter = true - } else if val, ok := q.Filter.(bson.M); ok && (val == nil || len(val) == 0) { - emptyFilter = true - } - - if emptyFilter && !isView { - opt := mopt.EstimatedDocumentCount() - c, err := q.Coll.EstimatedDocumentCount(nil, opt) - return int(c), err - } - - opt := mopt.Count() - c, err := q.Coll.CountDocuments(nil, filter, opt) - return int(c), err -} - -// Iter executes a find query and returns a cursor. -func (q *DeferredQuery) Iter() (*mongo.Cursor, error) { - opts := mopt.Find() - if q.Hint != nil { - opts.SetHint(q.Hint) - } - if q.LogReplay { - opts.SetOplogReplay(true) - } - filter := q.Filter - if filter == nil { - filter = bson.D{} - } - return q.Coll.Find(nil, filter, opts) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/read_preferences.go b/vendor/github.com/mongodb/mongo-tools/common/db/read_preferences.go deleted file mode 100644 index 29dcc7af5..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/read_preferences.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "fmt" - "time" - - "github.com/mongodb/mongo-tools/common/json" - "go.mongodb.org/mongo-driver/mongo/readpref" - "go.mongodb.org/mongo-driver/tag" - "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" -) - -// readPrefDoc is a struct corresponding to the json object passed in for the --readPreference command line arg. -type readPrefDoc struct { - Mode *string - TagSets []map[string]string - MaxStalenessSeconds *int -} - -const ( - WarningNonPrimaryMongosConnection = "Warning: using a non-primary readPreference with a " + - "connection to mongos may produce inconsistent duplicates or miss some documents." -) - -// NewReadPreference takes a string (command line read preference argument) and a ConnString (from the command line -// URI argument) and returns a ReadPref. If both are provided, preference is given to the command line argument. If -// both are empty, a default read preference of primary will be returned. -func NewReadPreference(rp string, cs *connstring.ConnString) (*readpref.ReadPref, error) { - if rp == "" && (cs == nil || cs.ReadPreference == "") { - return readpref.Primary(), nil - } - - if rp == "" { - return readPrefFromConnString(cs) - } - - var mode string - var options []readpref.Option - if rp[0] != '{' { - mode = rp - } else { - var doc readPrefDoc - err := json.Unmarshal([]byte(rp), &doc) - if err != nil { - return nil, fmt.Errorf("invalid json object: %v", err) - } - - if doc.Mode == nil { - return nil, fmt.Errorf("no 'mode' specified") - } - mode = *doc.Mode - - if doc.TagSets != nil { - options = append(options, readpref.WithTagSets(tag.NewTagSetsFromMaps(doc.TagSets)...)) - } - - if doc.MaxStalenessSeconds != nil { - options = append(options, readpref.WithMaxStaleness(time.Duration(*doc.MaxStalenessSeconds)*time.Second)) - } - } - - rpMode, err := readpref.ModeFromString(mode) - if err != nil { - return nil, err - } - - return readpref.New(rpMode, options...) -} - -func readPrefFromConnString(cs *connstring.ConnString) (*readpref.ReadPref, error) { - var opts []readpref.Option - - tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets) - if len(tagSets) > 0 { - opts = append(opts, readpref.WithTagSets(tagSets...)) - } - - if cs.MaxStalenessSet { - opts = append(opts, readpref.WithMaxStaleness(cs.MaxStaleness)) - } - - mode, err := readpref.ModeFromString(cs.ReadPreference) - if err != nil { - return nil, err - } - - return readpref.New(mode, opts...) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/version.go b/vendor/github.com/mongodb/mongo-tools/common/db/version.go deleted file mode 100644 index 85d81f325..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/version.go +++ /dev/null @@ -1,31 +0,0 @@ -package db - -type Version [3]int - -func (v1 Version) Cmp(v2 Version) int { - for i := range v1 { - if v1[i] < v2[i] { - return -1 - } - if v1[i] > v2[i] { - return 1 - } - } - return 0 -} - -func (v1 Version) LT(v2 Version) bool { - return v1.Cmp(v2) == -1 -} - -func (v1 Version) LTE(v2 Version) bool { - return v1.Cmp(v2) != 1 -} - -func (v1 Version) GT(v2 Version) bool { - return v1.Cmp(v2) == 1 -} - -func (v1 Version) GTE(v2 Version) bool { - return v1.Cmp(v2) != -1 -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/write_concern.go b/vendor/github.com/mongodb/mongo-tools/common/db/write_concern.go deleted file mode 100644 index f489695b3..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/db/write_concern.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package db - -import ( - "github.com/mongodb/mongo-tools/common/json" - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" - - "fmt" - "strconv" - "time" -) - -// write concern fields -const ( - j = "j" - w = "w" - wTimeout = "wtimeout" - majString = "majority" -) - -// NewMongoWriteConcern takes a string (from the command line writeConcern option) and a ConnString object -// (from the command line uri option) and returns a WriteConcern. If both are provided, preference is given to -// the command line writeConcern option. If neither is provided, the default 'majority' write concern is constructed. -func NewMongoWriteConcern(writeConcern string, cs *connstring.ConnString) (wc *writeconcern.WriteConcern, err error) { - - // Log whatever write concern was generated - defer func() { - if wc != nil { - log.Logvf(log.Info, "using write concern: %v", wc) - } - }() - - // URI Connection String provided but no String provided case; constructWCFromConnString handles - // default for ConnString without write concern - if writeConcern == "" && cs != nil { - return constructWCFromConnString(cs) - } - - // String case; constructWCFromString handles default for empty string - return constructWCFromString(writeConcern) -} - -// constructWCFromConnString takes in a parsed connection string and -// extracts values from it. If the ConnString has no write concern value, it defaults -// to 'majority'. -func constructWCFromConnString(cs *connstring.ConnString) (*writeconcern.WriteConcern, error) { - var opts []writeconcern.Option - - switch { - case cs.WNumberSet: - if cs.WNumber < 0 { - return nil, fmt.Errorf("invalid 'w' argument: %v", cs.WNumber) - } - - opts = append(opts, writeconcern.W(cs.WNumber)) - case cs.WString != "": - opts = append(opts, writeconcern.WTagSet(cs.WString)) - default: - opts = append(opts, writeconcern.WMajority()) - } - - opts = append(opts, writeconcern.J(cs.J)) - opts = append(opts, writeconcern.WTimeout(cs.WTimeout)) - - return writeconcern.New(opts...), nil -} - -// constructWCFromString takes in a write concern and attempts to -// extract values from it. It returns an error if it is unable to parse the -// string or if a parsed write concern field value is invalid. -func constructWCFromString(writeConcern string) (*writeconcern.WriteConcern, error) { - - // Default case - if writeConcern == "" { - return writeconcern.New(writeconcern.WMajority()), nil - } - - // Try to unmarshal as JSON document - jsonWriteConcern := map[string]interface{}{} - err := json.Unmarshal([]byte(writeConcern), &jsonWriteConcern) - if err == nil { - return parseJSONWriteConcern(jsonWriteConcern) - } - - // If JSON parsing fails, try to parse it as a plain string instead. This - // allows a default to the old behavior wherein the entire argument passed - // in is assigned to the 'w' field - thus allowing users pass a write - // concern that looks like: "majority", 0, "4", etc. - wOpt, err := parseModeString(writeConcern) - if err != nil { - return nil, err - } - - return writeconcern.New(wOpt), err -} - -// parseJSONWriteConcern converts a JSON map representing a write concern object into a WriteConcern -func parseJSONWriteConcern(jsonWriteConcern map[string]interface{}) (*writeconcern.WriteConcern, error) { - var opts []writeconcern.Option - - // Construct new options from 'w', if it exists; otherwise default to 'majority' - if wVal, ok := jsonWriteConcern[w]; ok { - opt, err := parseWField(wVal) - if err != nil { - return nil, err - } - - opts = append(opts, opt) - } else { - opts = append(opts, writeconcern.WMajority()) - } - - // Journal option - if jVal, ok := jsonWriteConcern[j]; ok && util.IsTruthy(jVal) { - opts = append(opts, writeconcern.J(true)) - } - - // Wtimeout option - if wtimeout, ok := jsonWriteConcern[wTimeout]; ok { - timeoutVal, err := util.ToInt(wtimeout) - if err != nil { - return nil, fmt.Errorf("invalid '%v' argument: %v", wTimeout, wtimeout) - } - // Previous implementation assumed passed in string was milliseconds - opts = append(opts, writeconcern.WTimeout(time.Duration(timeoutVal)*time.Millisecond)) - } - - return writeconcern.New(opts...), nil -} - -func parseWField(wValue interface{}) (writeconcern.Option, error) { - // Try parsing as int - if wNumber, err := util.ToInt(wValue); err == nil { - return parseModeNumber(wNumber) - } - - // Try parsing as string - if wStrVal, ok := wValue.(string); ok { - return parseModeString(wStrVal) - } - - return nil, fmt.Errorf("invalid 'w' argument type: %v has type %T", wValue, wValue) -} - -// Given an integer, returns a write concern object or error -func parseModeNumber(wNumber int) (writeconcern.Option, error) { - if wNumber < 0 { - return nil, fmt.Errorf("invalid 'w' argument: %v", wNumber) - } - - return writeconcern.W(wNumber), nil -} - -// Given a string, returns a write concern object or error -func parseModeString(wString string) (writeconcern.Option, error) { - // Default case - if wString == "" { - return writeconcern.WMajority(), nil - } - - // Try parsing as number before treating as just a string - if wNumber, err := strconv.Atoi(wString); err == nil { - return parseModeNumber(wNumber) - } - - return writeconcern.WTagSet(wString), nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/dumprestore/config.go b/vendor/github.com/mongodb/mongo-tools/common/dumprestore/config.go deleted file mode 100644 index 5553aaf5b..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/dumprestore/config.go +++ /dev/null @@ -1,20 +0,0 @@ -package dumprestore - -// ConfigCollectionsToKeep defines a list of collections in the `config` -// database that we include by default in backups and restores. -// -// These are the only config collections that are dumped when dumping -// and entire cluster. If you set mognodump --db=config then everything -// in the config collection is included. -// -// These are the only collections that mongorestore will apply oplog events for -// if --replayOplog is set. -var ConfigCollectionsToKeep = []string{ - "chunks", - "collections", - "databases", - "settings", - "shards", - "tags", - "version", -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint.go b/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint.go deleted file mode 100644 index e34d1707a..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -//go:build failpoints -// +build failpoints - -// Package failpoint implements triggers for custom debugging behavoir -package failpoint - -import ( - "strings" -) - -var values map[string]string - -func init() { - values = make(map[string]string) -} - -func Reset() { - values = make(map[string]string) -} - -// ParseFailpoints registers a comma-separated list of failpoint=value pairs -func ParseFailpoints(arg string) { - args := strings.Split(arg, ",") - for _, fp := range args { - if sep := strings.Index(fp, "="); sep != -1 { - key := fp[:sep] - val := fp[sep+1:] - values[key] = val - continue - } - values[fp] = "" - } -} - -// Get returns the value of the given failpoint and true, if it exists, and -// false otherwise -func Get(fp string) (string, bool) { - val, ok := values[fp] - return val, ok -} - -// Enabled returns true iff the given failpoint has been turned on -func Enabled(fp string) bool { - _, ok := Get(fp) - return ok -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint_disabled.go b/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint_disabled.go deleted file mode 100644 index 9a6f6bb51..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoint_disabled.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -//go:build !failpoints -// +build !failpoints - -package failpoint - -func ParseFailpoints(_ string) { -} - -func Reset() { - return -} - -func Get(fp string) (string, bool) { - return "", false -} - -func Enabled(fp string) bool { - return false -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoints.go b/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoints.go deleted file mode 100644 index 0258ca1d2..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/failpoint/failpoints.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package failpoint - -// Supported failpoint names -const ( - PauseBeforeDumping = "PauseBeforeDumping" - SlowBSONDump = "SlowBSONDump" -) diff --git a/vendor/github.com/mongodb/mongo-tools/common/idx/idx.go b/vendor/github.com/mongodb/mongo-tools/common/idx/idx.go deleted file mode 100644 index df04b8ede..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/idx/idx.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package idx - -import ( - "fmt" - - "go.mongodb.org/mongo-driver/bson" -) - -// IndexDocument holds information about a collection's index. -type IndexDocument struct { - Options bson.M `bson:",inline"` - Key bson.D `bson:"key"` - PartialFilterExpression bson.D `bson:"partialFilterExpression,omitempty"` -} - -// NewIndexDocumentFromD converts a bson.D index spec into an IndexDocument -func NewIndexDocumentFromD(doc bson.D) (*IndexDocument, error) { - indexDoc := IndexDocument{} - - for _, elem := range doc { - switch elem.Key { - case "key": - if val, ok := elem.Value.(bson.D); ok { - indexDoc.Key = val - continue - } else { - return nil, fmt.Errorf("index key could not type assert to bson.D") - } - case "partialFilterExpression": - if val, ok := elem.Value.(bson.D); ok { - indexDoc.PartialFilterExpression = val - continue - } else { - return nil, fmt.Errorf("index partialFilterExpression could not type assert to bson.D") - } - default: - indexDoc.Options[elem.Key] = elem.Value - } - } - - return &indexDoc, nil -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/idx/index_catalog.go b/vendor/github.com/mongodb/mongo-tools/common/idx/index_catalog.go deleted file mode 100644 index cb7e9b21f..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/idx/index_catalog.go +++ /dev/null @@ -1,404 +0,0 @@ -package idx - -import ( - "fmt" - "strings" - "sync" - - "github.com/mongodb/mongo-tools/common/bsonutil" - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/options" - "github.com/pkg/errors" - "go.mongodb.org/mongo-driver/bson" -) - -// CollectionIndexCatalog stores the current view of all indexes of a single collection. -type CollectionIndexCatalog struct { - // Maps index name to the raw index spec. - indexes map[string]*IndexDocument - simpleCollation bool -} - -// IndexCatalog stores the current view of all indexes in all databases. -type IndexCatalog struct { - sync.Mutex - // Maps database name to collection name to CollectionIndexCatalog. - indexes map[string]map[string]*CollectionIndexCatalog -} - -// NewIndexCatalog inits an IndexCatalog -func NewIndexCatalog() *IndexCatalog { - return &IndexCatalog{indexes: make(map[string]map[string]*CollectionIndexCatalog)} -} - -// Namespaces returns all the namespaces in the IndexCatalog -func (i *IndexCatalog) Namespaces() (namespaces []options.Namespace) { - for database, dbIndexMap := range i.indexes { - for collection := range dbIndexMap { - namespaces = append(namespaces, options.Namespace{database, collection}) - } - } - return namespaces -} - -func (i *IndexCatalog) getCollectionIndexes(database, collection string) map[string]*IndexDocument { - dbIndexes, found := i.indexes[database] - if !found { - dbIndexes = make(map[string]*CollectionIndexCatalog) - i.indexes[database] = dbIndexes - } - collIndexCatalog, found := dbIndexes[collection] - if !found { - collIndexCatalog = &CollectionIndexCatalog{ - indexes: make(map[string]*IndexDocument), - } - dbIndexes[collection] = collIndexCatalog - } - return collIndexCatalog.indexes -} - -func (i *IndexCatalog) getCollectionIndexCatalog(database, collection string) *CollectionIndexCatalog { - dbIndexes, found := i.indexes[database] - if !found { - dbIndexes = make(map[string]*CollectionIndexCatalog) - i.indexes[database] = dbIndexes - } - collIndexCatalog, found := dbIndexes[collection] - if !found { - collIndexCatalog = &CollectionIndexCatalog{ - indexes: make(map[string]*IndexDocument), - } - dbIndexes[collection] = collIndexCatalog - } - return collIndexCatalog -} - -func (i *IndexCatalog) addIndex(database, collection, indexName string, index *IndexDocument) { - i.Lock() - collIndexes := i.getCollectionIndexes(database, collection) - collIndexes[indexName] = index - i.Unlock() -} - -// AddIndex stores the given index into the index catalog. An example index: -// -// { -// "v": 2, -// "key": { -// "lastModifiedDate": 1 -// }, -// "name": "lastModifiedDate_1", -// "ns": "test.eventlog" -// } -func (i *IndexCatalog) AddIndex(database, collection string, index *IndexDocument) { - indexName, ok := index.Options["name"].(string) - if !ok { - return - } - i.addIndex(database, collection, indexName, index) -} - -// SetCollation sets if a collection has a simple collation -func (i *IndexCatalog) SetCollation(database, collection string, simpleCollation bool) { - i.Lock() - defer i.Unlock() - collIndexCatalog := i.getCollectionIndexCatalog(database, collection) - collIndexCatalog.simpleCollation = simpleCollation -} - -// AddIndexes stores the given indexes into the index catalog. -func (i *IndexCatalog) AddIndexes(database, collection string, indexes []*IndexDocument) { - for _, index := range indexes { - i.AddIndex(database, collection, index) - } -} - -// GetIndex returns an IndexDocument for a given index name -func (i *IndexCatalog) GetIndex(database, collection, indexName string) *IndexDocument { - dbIndexes, found := i.indexes[database] - if !found { - return nil - } - collIndexCatalog, found := dbIndexes[collection] - if !found { - return nil - } - indexSpec, found := collIndexCatalog.indexes[indexName] - if !found { - return nil - } - return indexSpec -} - -// String formats the IndexCatalog for debugging purposes -func (i *IndexCatalog) String() string { - var b strings.Builder - b.WriteString("IndexCatalog:\n") - for dbName, coll := range i.indexes { - for collName, collIndexCatalog := range coll { - b.WriteString(fmt.Sprintf("\t%s.%s: \n", dbName, collName)) - for indexName, indexSpec := range collIndexCatalog.indexes { - b.WriteString(fmt.Sprintf("\t\t%s: %+#v\n", indexName, indexSpec)) - } - b.WriteByte('\n') - } - } - return b.String() -} - -func hasCollationOnIndex(index *IndexDocument) bool { - if _, ok := index.Options["collation"]; ok { - return true - } - return false -} - -// GetIndexes returns all the indexes for the given collection. -// When the collection has a non-simple collation an explicit simple collation -// must be added to the indexes with no "collation field. Otherwise, the index -// will wrongfully inherit the collections's collation. -// This is necessary because indexes with the simple collation do not have a -// "collation" field in the getIndexes output. -func (i *IndexCatalog) GetIndexes(database, collection string) []*IndexDocument { - dbIndexes, found := i.indexes[database] - if !found { - return nil - } - collIndexCatalog, found := dbIndexes[collection] - if !found { - return nil - } - var syncedIndexes []*IndexDocument - for _, index := range collIndexCatalog.indexes { - if !collIndexCatalog.simpleCollation && !hasCollationOnIndex(index) { - index.Options["collation"] = bson.D{{"locale", "simple"}} - } - syncedIndexes = append(syncedIndexes, index) - } - return syncedIndexes -} - -// DropDatabase removes a database from the index catalog. -func (i *IndexCatalog) DropDatabase(database string) { - delete(i.indexes, database) -} - -// DropCollection removes a collection from the index catalog. -func (i *IndexCatalog) DropCollection(database, collection string) { - delete(i.indexes[database], collection) -} - -// DeleteIndexes removes indexes from the index catalog. dropCmd may be, -// {"deleteIndexes": "eventlog", "index": "*"} -// or, -// {"deleteIndexes": "eventlog", "index": "name_1"} -func (i *IndexCatalog) DeleteIndexes(database, collection string, dropCmd bson.D) error { - collIndexes := i.getCollectionIndexes(database, collection) - if len(collIndexes) == 0 { - // We have no indexes to drop. - return nil - } - indexValue, keyError := bsonutil.FindValueByKey("index", &dropCmd) - if keyError != nil { - return nil - } - switch indexToDrop := indexValue.(type) { - case string: - if indexToDrop == "*" { - catalog := i.getCollectionIndexCatalog(database, collection) - - var idIndexName string - var idIndex *IndexDocument - for name, doc := range catalog.indexes { - keyMap := doc.Key.Map() - if len(keyMap) == 1 { - if _, isId := keyMap["_id"]; isId { - idIndexName = name - idIndex = doc - break - } - } - } - - // Drop all non-id indexes for the collection. - i.DropCollection(database, collection) - if idIndex != nil { - i.addIndex(database, collection, idIndexName, idIndex) - } - return nil - } - - // Drop an index by name. - delete(collIndexes, indexToDrop) - return nil - case bson.D: - var toDelete []string - // Drop an index by key pattern. - for key, value := range collIndexes { - isEq, err := bsonutil.IsEqual(indexToDrop, value.Key) - if err != nil { - return fmt.Errorf("could not drop index on %s.%s, could not handle %v: "+ - "was unable to find matching index in indexCatalog. Error with equality test: %v", - database, collection, dropCmd[0].Key, err) - } - - if isEq { - toDelete = append(toDelete, key) - } - } - if len(toDelete) > 1 { - return fmt.Errorf("could not drop index on %s.%s: "+ - "the key %v somehow matched more than one index in the collection", database, collection, dropCmd[0].Key) - } - // Could we have 0 items in toDelete? I'm not sure, so it's best to - // avoid accessing toDelete[0]. - for _, td := range toDelete { - delete(collIndexes, td) - } - - log.Logvf(log.DebugHigh, "Must drop index on %s.%s by key pattern: %v", database, collection, indexToDrop) - return nil - default: - return fmt.Errorf("could not drop index on %s.%s, could not handle %v: "+ - "expected string or object for 'index', found: %T, %v", - database, collection, dropCmd[0].Key, indexToDrop, indexToDrop) - } -} - -func updateExpireAfterSeconds(index *IndexDocument, expire int64) error { - if _, ok := index.Options["expireAfterSeconds"]; !ok { - return errors.Errorf("missing \"expireAfterSeconds\" in matching index: %v", index) - } - index.Options["expireAfterSeconds"] = expire - return nil -} - -func updateHidden(index *IndexDocument, hidden bool) { - index.Options["hidden"] = hidden -} - -// GetIndexByIndexMod returns an index that matches the name or key pattern specified in -// a collMod command. -func (i *IndexCatalog) GetIndexByIndexMod(database, collection string, indexMod bson.D) (*IndexDocument, error) { - // Look for "name" or "keyPattern". - name, nameErr := bsonutil.FindStringValueByKey("name", &indexMod) - keyPattern, keyPatternErr := bsonutil.FindSubdocumentByKey("keyPattern", &indexMod) - switch { - case nameErr == nil && keyPatternErr == nil: - return nil, errors.Errorf("cannot specify both index name and keyPattern: %v", indexMod) - case nameErr != nil && keyPatternErr != nil: - return nil, errors.Errorf("must specify either index name (as a string) or keyPattern (as a document): %v", indexMod) - case nameErr == nil: - matchingIndex := i.GetIndex(database, collection, name) - if matchingIndex == nil { - return nil, errors.Errorf("cannot find index in indexCatalog for collMod: %v", indexMod) - } - return matchingIndex, nil - case keyPatternErr == nil: - collIndexes := i.getCollectionIndexes(database, collection) - for _, indexSpec := range collIndexes { - isEq, err := bsonutil.IsEqual(keyPattern, indexSpec.Key) - if err != nil { - return nil, fmt.Errorf("was unable to find matching index in indexCatalog. Error with equality test: %v", err) - } - if isEq { - return indexSpec, nil - } - } - return nil, errors.Errorf("cannot find index in indexCatalog for collMod: %v", indexMod) - default: - return nil, errors.Errorf("cannot find index in indexCatalog for collMod: %v", indexMod) - } -} - -func (i *IndexCatalog) collMod(database, collection string, indexModValue interface{}) error { - indexMod, ok := indexModValue.(bson.D) - if !ok { - return errors.Errorf("unknown collMod \"index\" modifier: %v", indexModValue) - } - - matchingIndex, err := i.GetIndexByIndexMod(database, collection, indexMod) - if err != nil { - return err - } - if matchingIndex == nil { - // Did not find an index to modify. - return errors.Errorf("cannot find index in indexCatalog for collMod: %v", indexMod) - } - - expireValue, expireKeyError := bsonutil.FindValueByKey("expireAfterSeconds", &indexMod) - if expireKeyError == nil { - newExpire, ok := expireValue.(int64) - if !ok { - return errors.Errorf("expireAfterSeconds must be a number (found %v of type %T): %v", expireValue, expireValue, indexMod) - } - err = updateExpireAfterSeconds(matchingIndex, newExpire) - if err != nil { - return err - } - } - - expireValue, hiddenKeyError := bsonutil.FindValueByKey("hidden", &indexMod) - if hiddenKeyError == nil { - newHidden, ok := expireValue.(bool) - if !ok { - return errors.Errorf("hidden must be a boolean (found %v of type %T): %v", expireValue, expireValue, indexMod) - } - updateHidden(matchingIndex, newHidden) - } - - if expireKeyError != nil && hiddenKeyError != nil { - return errors.Errorf("must specify expireAfterSeconds or hidden: %v", indexMod) - } - - // Update the index. - i.AddIndex(database, collection, matchingIndex) - return nil -} - -// CollMod, updates the corresponding TTL index if the given collModCmd -// updates the "expireAfterSeconds" or "hiddne" fields. For example, -// -// { -// "collMod": "sessions", -// "index": {"keyPattern": {"lastAccess": 1}, "expireAfterSeconds": 3600}} -// } -// -// or, -// -// { -// "collMod": "sessions", -// "index": {"name": "lastAccess_1", "expireAfterSeconds": 3600}} -// } -func (i *IndexCatalog) CollMod(database, collection string, indexModValue interface{}) error { - err := i.collMod(database, collection, indexModValue) - if err != nil { - return fmt.Errorf("could not handle collMod on %s.%s: %v", database, collection, err) - } - return nil -} - -// NamespaceQueue is a goroutine-safe queue of namespaces -type NamespaceQueue struct { - m sync.Mutex - namespaces []options.Namespace -} - -// Queue returns a namespace queue of the current namespaces in the index catalog. -func (i *IndexCatalog) Queue() *NamespaceQueue { - var namespaceQueue NamespaceQueue - namespaceQueue.namespaces = i.Namespaces() - return &namespaceQueue -} - -// Pop removes the next element from the queue and returns it. It is goroutine-safe. -func (q *NamespaceQueue) Pop() *options.Namespace { - q.m.Lock() - defer q.m.Unlock() - if len(q.namespaces) == 0 { - return nil - } - namespace := q.namespaces[0] - q.namespaces = q.namespaces[1:] - return &namespace -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/intents/intent.go b/vendor/github.com/mongodb/mongo-tools/common/intents/intent.go deleted file mode 100644 index 9a98b3637..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/intents/intent.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -// Package intents provides utilities for performing dump/restore operations. -package intents - -import ( - "fmt" - "io" - - "github.com/mongodb/mongo-tools/common/bsonutil" - "github.com/mongodb/mongo-tools/common/log" - "github.com/mongodb/mongo-tools/common/util" - "go.mongodb.org/mongo-driver/bson" -) - -type file interface { - io.ReadWriteCloser - Open() error - Pos() int64 -} - -// DestinationConflictError occurs when multiple namespaces map to the same -// destination. -type DestinationConflictError struct { - Src, Dst string -} - -func (e DestinationConflictError) Error() string { - return fmt.Sprintf("destination conflict: %s (src) => %s (dst)", e.Src, e.Dst) -} - -// FileNeedsIOBuffer is an interface that denotes that a struct needs -// an IO buffer that is managed by an outside control. This interface -// is used to both hand off a buffer to a struct and signal that it should -// release its buffer. Added to reduce memory usage as outlined in TOOLS-1088. -type FileNeedsIOBuffer interface { - TakeIOBuffer([]byte) - ReleaseIOBuffer() -} - -// mongorestore first scans the directory to generate a list -// of all files to restore and what they map to. TODO comments -type Intent struct { - // Destination namespace info - DB string - C string - - // File locations as absolute paths - BSONFile file - BSONSize int64 - MetadataFile file - - // Indicates where the intent will be read from or written to - Location string - MetadataLocation string - - // Collection options - Options bson.D - - // UUID (for MongoDB 3.6+) as a big-endian hex string - UUID string - - // File/collection size, for some prioritizer implementations. - // Units don't matter as long as they are consistent for a given use case. - Size int64 - - // Either view or timeseries. Empty string "" is a regular collection. - Type string -} - -func (it *Intent) DataNamespace() string { - return it.DB + "." + it.DataCollection() -} - -func (it *Intent) DataCollection() string { - if it.IsTimeseries() { - return "system.buckets." + it.C - } - return it.C -} - -func (it *Intent) Namespace() string { - return it.DB + "." + it.C -} - -func (it *Intent) IsTimeseries() bool { - return it.Type == "timeseries" -} - -func (it *Intent) IsOplog() bool { - if it.DB == "" && it.C == "oplog" { - return true - } - return it.DB == "local" && (it.C == "oplog.rs" || it.C == "oplog.$main") -} - -func (it *Intent) IsUsers() bool { - if it.C == "$admin.system.users" { - return true - } - if it.DB == "admin" && it.C == "system.users" { - return true - } - return false -} - -func (it *Intent) IsRoles() bool { - if it.C == "$admin.system.roles" { - return true - } - if it.DB == "admin" && it.C == "system.roles" { - return true - } - return false -} - -func (it *Intent) IsAuthVersion() bool { - if it.C == "$admin.system.version" { - return true - } - if it.DB == "admin" && it.C == "system.version" { - return true - } - return false -} - -func (it *Intent) IsSystemIndexes() bool { - return it.C == "system.indexes" -} - -func (it *Intent) IsSystemProfile() bool { - return it.C == "system.profile" -} - -func (it *Intent) IsSpecialCollection() bool { - // can't see oplog as special collection because when restore from archive it need to be a RegularCollectionReceiver - return it.IsSystemIndexes() || it.IsUsers() || it.IsRoles() || it.IsAuthVersion() || it.IsSystemProfile() -} - -func (it *Intent) IsView() bool { - return it.Type == "view" -} - -func (it *Intent) MergeIntent(newIt *Intent) { - // merge new intent into old intent - if it.BSONFile == nil { - it.BSONFile = newIt.BSONFile - } - if it.Size == 0 { - it.Size = newIt.Size - } - if it.Location == "" { - it.Location = newIt.Location - } - if it.MetadataFile == nil { - it.MetadataFile = newIt.MetadataFile - } - if it.MetadataLocation == "" { - it.MetadataLocation = newIt.MetadataLocation - } -} - -// HasSimpleCollation returns true if the collection does not have a collation -// specified or if the collation locale is "simple" -func (it *Intent) HasSimpleCollation() bool { - if it == nil || it.Options == nil { - return true - } - collation, err := bsonutil.FindSubdocumentByKey("collation", &it.Options) - if err != nil { - return true - } - localeValue, _ := bsonutil.FindValueByKey("locale", &collation) - return localeValue == "simple" -} - -type Manager struct { - // intents are for all of the regular user created collections - intents map[string]*Intent - // special intents are for all of the collections that are created by mongod - // and require special handling - specialIntents map[string]*Intent - - // legacy mongorestore works in the order that paths are discovered, - // so we need an ordered data structure to preserve this behavior. - intentsByDiscoveryOrder []*Intent - - // we need different scheduling order depending on the target - // mongod/mongos and whether or not we are multi threading; - // the IntentPrioritizer interface encapsulates this. - prioritizer IntentPrioritizer - - // special cases that should be saved but not be part of the queue. - // used to deal with oplog and user/roles restoration, which are - // handled outside of the basic logic of the tool - oplogIntent *Intent - usersIntent *Intent - rolesIntent *Intent - versionIntent *Intent - indexIntents map[string]*Intent - - // Tells the manager if it should choose a single oplog when multiple are provided. - smartPickOplog bool - - // Indicates if an the manager has seen two conflicting oplogs. - oplogConflict bool - - // prevent conflicting destinations by checking which sources map to the - // same namespace - destinations map[string][]string -} - -func NewIntentManager() *Manager { - return &Manager{ - intents: map[string]*Intent{}, - specialIntents: map[string]*Intent{}, - intentsByDiscoveryOrder: []*Intent{}, - indexIntents: map[string]*Intent{}, - smartPickOplog: false, - oplogConflict: false, - destinations: map[string][]string{}, - } -} - -func (mgr *Manager) SetSmartPickOplog(smartPick bool) { - mgr.smartPickOplog = smartPick -} - -// HasConfigDBIntent returns a bool indicating if any of the intents refer to the "config" database. -// This can be used to check for possible unwanted conflicts before restoring to a sharded system. -func (mgr *Manager) HasConfigDBIntent() bool { - for _, intent := range mgr.intentsByDiscoveryOrder { - if intent.DB == "config" { - return true - } - } - return false -} - -// PutOplogIntent takes an intent for an oplog and stores it in the intent manager with the -// provided key. If the manager has smartPickOplog enabled, then it uses a priority system -// to determine which oplog intent to maintain as the actual oplog. -func (mgr *Manager) PutOplogIntent(intent *Intent, managerKey string) { - if mgr.smartPickOplog { - if existing := mgr.specialIntents[managerKey]; existing != nil { - existing.MergeIntent(intent) - return - } - if mgr.oplogIntent == nil { - // If there is no oplog intent, make this one the oplog. - mgr.oplogIntent = intent - mgr.specialIntents[managerKey] = intent - } else if intent.DB == "" { - // We already have an oplog and this is a top priority oplog. - if mgr.oplogIntent.DB == "" { - // If the manager's current oplog is also top priority, we have a - // conflict and ignore this oplog. - mgr.oplogConflict = true - } else { - // If the manager's current oplog is lower priority, replace it and - // move that one to be a normal intent. - mgr.putNormalIntent(mgr.oplogIntent) - delete(mgr.specialIntents, mgr.oplogIntent.Namespace()) - mgr.oplogIntent = intent - mgr.specialIntents[managerKey] = intent - } - } else { - // We already have an oplog and this is a low priority oplog. - if mgr.oplogIntent.DB != "" { - // If the manager's current oplog is also low priority, set a conflict. - mgr.oplogConflict = true - } - // No matter what, set this lower priority oplog to be a normal intent. - mgr.putNormalIntent(intent) - } - } else { - if intent.DB == "" && intent.C == "oplog" { - // If this is a normal oplog, then add it as an oplog intent. - if existing := mgr.specialIntents[managerKey]; existing != nil { - existing.MergeIntent(intent) - return - } - mgr.oplogIntent = intent - mgr.specialIntents[managerKey] = intent - } else { - mgr.putNormalIntent(intent) - } - } -} - -func (mgr *Manager) putNormalIntent(intent *Intent) { - mgr.putNormalIntentWithNamespace(intent.Namespace(), intent) -} - -func (mgr *Manager) putNormalIntentWithNamespace(ns string, intent *Intent) { - // BSON and metadata files for the same collection are merged - // into the same intent. This is done to allow for simple - // pairing of BSON + metadata without keeping track of the - // state of the filepath walker - if existing := mgr.intents[ns]; existing != nil { - if existing.Namespace() != intent.Namespace() { - // remove old destination, add new one - dst := existing.Namespace() - dsts := mgr.destinations[dst] - i := util.StringSliceIndex(dsts, ns) - mgr.destinations[dst] = append(dsts[:i], dsts[i+1:]...) - - dsts = mgr.destinations[intent.Namespace()] - mgr.destinations[intent.Namespace()] = append(dsts, ns) - } - existing.MergeIntent(intent) - return - } - - // if key doesn't already exist, add it to the manager - mgr.intents[ns] = intent - mgr.intentsByDiscoveryOrder = append(mgr.intentsByDiscoveryOrder, intent) - - mgr.destinations[intent.Namespace()] = append(mgr.destinations[intent.Namespace()], ns) -} - -// Put inserts an intent into the manager with the same source namespace as -// its destinations. -func (mgr *Manager) Put(intent *Intent) { - log.Logvf(log.DebugLow, "enqueued collection '%v'", intent.Namespace()) - mgr.PutWithNamespace(intent.Namespace(), intent) -} - -// PutWithNamespace inserts an intent into the manager with the source set -// to the provided namespace. Intents for the same collection are merged -// together, so that BSON and metadata files for the same collection are -// returned in the same intent. -func (mgr *Manager) PutWithNamespace(ns string, intent *Intent) { - if intent == nil { - panic("cannot insert nil *Intent into IntentManager") - } - db, _ := util.SplitNamespace(ns) - - // bucket special-case collections - if intent.IsOplog() { - mgr.PutOplogIntent(intent, intent.Namespace()) - return - } - if intent.IsSystemIndexes() { - if intent.BSONFile != nil { - mgr.indexIntents[db] = intent - mgr.specialIntents[ns] = intent - } - return - } - if intent.IsUsers() { - if intent.BSONFile != nil { - mgr.usersIntent = intent - mgr.specialIntents[ns] = intent - } - return - } - if intent.IsRoles() { - if intent.BSONFile != nil { - mgr.rolesIntent = intent - mgr.specialIntents[ns] = intent - } - return - } - if intent.IsAuthVersion() { - if intent.BSONFile != nil { - mgr.versionIntent = intent - mgr.specialIntents[ns] = intent - } - return - } - - mgr.putNormalIntentWithNamespace(ns, intent) -} - -func (mgr *Manager) GetOplogConflict() bool { - return mgr.oplogConflict -} - -func (mgr *Manager) GetDestinationConflicts() (errs []DestinationConflictError) { - for dst, srcs := range mgr.destinations { - if len(srcs) <= 1 { - continue - } - for _, src := range srcs { - errs = append(errs, DestinationConflictError{Dst: dst, Src: src}) - } - } - return -} - -// Intents returns a slice containing all of the intents in the manager. -// Intents is not thread safe -func (mgr *Manager) Intents() []*Intent { - allIntents := []*Intent{} - for _, intent := range mgr.intents { - allIntents = append(allIntents, intent) - } - for _, intent := range mgr.indexIntents { - allIntents = append(allIntents, intent) - } - if mgr.oplogIntent != nil { - allIntents = append(allIntents, mgr.oplogIntent) - } - if mgr.usersIntent != nil { - allIntents = append(allIntents, mgr.usersIntent) - } - if mgr.rolesIntent != nil { - allIntents = append(allIntents, mgr.rolesIntent) - } - if mgr.versionIntent != nil { - allIntents = append(allIntents, mgr.versionIntent) - } - return allIntents -} - -// NormalIntents returns a slice containing all of the normal intents in the manager. -// NormalIntents is not thread safe. -func (mgr *Manager) NormalIntents() []*Intent { - allIntents := []*Intent{} - for _, intent := range mgr.intents { - allIntents = append(allIntents, intent) - } - return allIntents -} - -func (mgr *Manager) IntentForNamespace(ns string) *Intent { - intent := mgr.intents[ns] - if intent != nil { - return intent - } - intent = mgr.specialIntents[ns] - return intent -} - -// Pop returns the next available intent from the manager. If the manager is -// empty, it returns nil. Pop is thread safe. -func (mgr *Manager) Pop() *Intent { - return mgr.prioritizer.Get() -} - -// Peek returns a copy of a stored intent from the manager without removing -// the intent. This method is useful for edge cases that need to look ahead -// at what collections are in the manager before they are scheduled. -// -// NOTE: There are no guarantees that peek will return a usable -// intent after Finalize() is called. -func (mgr *Manager) Peek() *Intent { - if len(mgr.intentsByDiscoveryOrder) == 0 { - return nil - } - intentCopy := *mgr.intentsByDiscoveryOrder[0] - return &intentCopy -} - -// Finish tells the prioritizer that mongorestore is done restoring -// the given collection intent. -func (mgr *Manager) Finish(intent *Intent) { - mgr.prioritizer.Finish(intent) -} - -// Oplog returns the intent representing the oplog, which isn't -// stored with the other intents, because it is dumped and restored in -// a very different way from other collections. -func (mgr *Manager) Oplog() *Intent { - return mgr.oplogIntent -} - -// SystemIndexes returns the system.indexes bson for a database -func (mgr *Manager) SystemIndexes(dbName string) *Intent { - return mgr.indexIntents[dbName] -} - -// SystemIndexes returns the databases for which there are system.indexes -func (mgr *Manager) SystemIndexDBs() []string { - databases := []string{} - for dbname := range mgr.indexIntents { - databases = append(databases, dbname) - } - return databases -} - -// Users returns the intent of the users collection to restore, a special case -func (mgr *Manager) Users() *Intent { - return mgr.usersIntent -} - -// Roles returns the intent of the user roles collection to restore, a special case -func (mgr *Manager) Roles() *Intent { - return mgr.rolesIntent -} - -// AuthVersion returns the intent of the version collection to restore, a special case -func (mgr *Manager) AuthVersion() *Intent { - return mgr.versionIntent -} - -// Finalize processes the intents for prioritization. Currently only two -// kinds of prioritizers are supported. No more "Put" operations may be done -// after finalize is called. -func (mgr *Manager) Finalize(pType PriorityType) { - switch pType { - case Legacy: - log.Logv(log.DebugHigh, "finalizing intent manager with legacy prioritizer") - mgr.prioritizer = newLegacyPrioritizer(mgr.intentsByDiscoveryOrder) - case LongestTaskFirst: - log.Logv(log.DebugHigh, "finalizing intent manager with longest task first prioritizer") - mgr.prioritizer = newLongestTaskFirstPrioritizer(mgr.intentsByDiscoveryOrder) - case MultiDatabaseLTF: - log.Logv(log.DebugHigh, "finalizing intent manager with multi-database longest task first prioritizer") - mgr.prioritizer = newMultiDatabaseLTFPrioritizer(mgr.intentsByDiscoveryOrder) - default: - panic("cannot initialize IntentPrioritizer with unknown type") - } - // release these for the garbage collector and to ensure code correctness - mgr.intents = nil - mgr.intentsByDiscoveryOrder = nil -} - -func (mgr *Manager) UsePrioritizer(prioritizer IntentPrioritizer) { - mgr.prioritizer = prioritizer -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/intents/intent_prioritizer.go b/vendor/github.com/mongodb/mongo-tools/common/intents/intent_prioritizer.go deleted file mode 100644 index 74f169692..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/intents/intent_prioritizer.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package intents - -import ( - "container/heap" - "sort" - "sync" -) - -type PriorityType int - -const ( - Legacy PriorityType = iota - LongestTaskFirst - MultiDatabaseLTF -) - -// IntentPrioritizer encapsulates the logic of scheduling intents -// for restoration. It can know about which intents are in the -// process of being restored through the "Finish" hook. -// -// Oplog entries and auth entries are not handled by the prioritizer, -// as these are special cases handled by the regular mongorestore code. -type IntentPrioritizer interface { - Get() *Intent - Finish(*Intent) -} - -//===== Legacy ===== - -// legacyPrioritizer processes the intents in the order they were read off the -// file system, keeping with legacy mongorestore behavior. -type legacyPrioritizer struct { - sync.Mutex - queue []*Intent -} - -func newLegacyPrioritizer(intentList []*Intent) *legacyPrioritizer { - return &legacyPrioritizer{queue: intentList} -} - -func (legacy *legacyPrioritizer) Get() *Intent { - legacy.Lock() - defer legacy.Unlock() - - if len(legacy.queue) == 0 { - return nil - } - - var intent *Intent - intent, legacy.queue = legacy.queue[0], legacy.queue[1:] - return intent -} - -func (legacy *legacyPrioritizer) Finish(*Intent) { - // no-op - return -} - -//===== Longest Task First ===== - -// longestTaskFirstPrioritizer returns intents in the order of largest -> smallest, -// with views at the front of the list, which is better at minimizing total -// runtime in parallel environments than other simple orderings. -type longestTaskFirstPrioritizer struct { - sync.Mutex - queue []*Intent -} - -// newLongestTaskFirstPrioritizer returns an initialized LTP prioritizer -func newLongestTaskFirstPrioritizer(intents []*Intent) *longestTaskFirstPrioritizer { - sort.Sort(BySizeAndView(intents)) - return &longestTaskFirstPrioritizer{ - queue: intents, - } -} - -func (ltf *longestTaskFirstPrioritizer) Get() *Intent { - ltf.Lock() - defer ltf.Unlock() - - if len(ltf.queue) == 0 { - return nil - } - - var intent *Intent - intent, ltf.queue = ltf.queue[0], ltf.queue[1:] - return intent -} - -func (ltf *longestTaskFirstPrioritizer) Finish(*Intent) { - // no-op - return -} - -// BySizeAndView attaches the methods for sort.Interface for sorting intents -// from largest to smallest size, taking into account if it's a view or not. -type BySizeAndView []*Intent - -func (s BySizeAndView) Len() int { return len(s) } -func (s BySizeAndView) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s BySizeAndView) Less(i, j int) bool { - if s[i].IsView() && !s[j].IsView() { - return true - } - if !s[i].IsView() && s[j].IsView() { - return false - } - return s[i].Size > s[j].Size -} - -// For sorting intents from largest to smallest size -type BySize []*Intent - -func (s BySize) Len() int { return len(s) } -func (s BySize) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s BySize) Less(i, j int) bool { return s[i].Size > s[j].Size } - -//===== Multi Database Longest Task First ===== - -// multiDatabaseLTF is designed to properly schedule intents with two constraints: -// 1. it is optimized to run in a multi-processor environment -// 2. it is optimized for parallelism against 2.6's db-level write lock -// -// These goals result in a design that attempts to have as many different -// database's intents being restored as possible and attempts to restore the -// largest collections first. -// -// If we can have a minimum number of collections in flight for a given db, -// we avoid lock contention in an optimal way on 2.6 systems. That is, -// it is better to have two restore jobs where -// -// job1 = "test.mycollection" -// job2 = "mydb2.othercollection" -// -// so that these collections do not compete for the db-level write lock. -// -// We also schedule the largest jobs first, in a greedy fashion, in order -// to minimize total restoration time. Each database's intents are sorted -// by decreasing file size at initialization, so that the largest jobs are -// run first. Admittedly, .bson file size is not a direct predictor of restore -// time, but there is certainly a strong correlation. Note that this attribute -// is secondary to the multi-db scheduling laid out above, since multi-db will -// get us bigger wins in terms of parallelism. -type multiDatabaseLTFPrioritizer struct { - sync.Mutex - dbHeap heap.Interface - counterMap map[string]*dbCounter -} - -// newMultiDatabaseLTFPrioritizer takes in a list of intents and returns an -// initialized prioritizer. -func newMultiDatabaseLTFPrioritizer(intents []*Intent) *multiDatabaseLTFPrioritizer { - prioritizer := &multiDatabaseLTFPrioritizer{ - counterMap: map[string]*dbCounter{}, - dbHeap: &DBHeap{}, - } - heap.Init(prioritizer.dbHeap) - // first, create all database counters - for _, intent := range intents { - counter, exists := prioritizer.counterMap[intent.DB] - if !exists { - // initialize a new counter if one doesn't exist for DB - counter = &dbCounter{} - prioritizer.counterMap[intent.DB] = counter - } - counter.collections = append(counter.collections, intent) - } - // then ensure that all the dbCounters have sorted intents - for _, counter := range prioritizer.counterMap { - counter.SortCollectionsBySize() - heap.Push(prioritizer.dbHeap, counter) - } - return prioritizer -} - -// Get returns the next prioritized intent and updates the count of active -// restores for the returned intent's DB. Get is not thread safe, and depends -// on the implementation of the intent manager to lock around it. -func (mdb *multiDatabaseLTFPrioritizer) Get() *Intent { - mdb.Lock() - defer mdb.Unlock() - - if mdb.dbHeap.Len() == 0 { - // we're out of things to return - return nil - } - optimalDB := heap.Pop(mdb.dbHeap).(*dbCounter) - optimalDB.active++ - nextIntent := optimalDB.PopIntent() - // only release the db counter if it's out of collections - if len(optimalDB.collections) > 0 { - heap.Push(mdb.dbHeap, optimalDB) - } - return nextIntent -} - -// Finish decreases the number of active restore jobs for the given intent's -// database, and reshuffles the heap accordingly. -func (mdb *multiDatabaseLTFPrioritizer) Finish(intent *Intent) { - mdb.Lock() - defer mdb.Unlock() - - counter := mdb.counterMap[intent.DB] - counter.active-- - // only fix up the heap if the counter is still in the heap - if len(counter.collections) > 0 { - // This is an O(n) operation on the heap. We could make all heap - // operations O(log(n)) if we set up dbCounters to track their own - // position in the heap, but in practice this overhead is likely negligible. - heap.Init(mdb.dbHeap) - } -} - -type dbCounter struct { - active int - collections []*Intent -} - -func (dbc *dbCounter) SortCollectionsBySize() { - sort.Sort(BySize(dbc.collections)) -} - -// PopIntent returns the largest intent remaining for the database -func (dbc *dbCounter) PopIntent() *Intent { - var intent *Intent - if len(dbc.collections) > 0 { - intent, dbc.collections = dbc.collections[0], dbc.collections[1:] - } - return intent -} - -// Returns the largest collection of the databases with the least active restores. -// Implements the container/heap interface. None of its methods are meant to be -// called directly. -type DBHeap []*dbCounter - -func (dbh DBHeap) Len() int { return len(dbh) } -func (dbh DBHeap) Swap(i, j int) { dbh[i], dbh[j] = dbh[j], dbh[i] } -func (dbh DBHeap) Less(i, j int) bool { - if dbh[i].active == dbh[j].active { - // prioritize the largest bson file if dbs have the same number - // of restorations in progress - return dbh[i].collections[0].Size > dbh[j].collections[0].Size - } - return dbh[i].active < dbh[j].active -} - -func (dbh *DBHeap) Push(x interface{}) { - *dbh = append(*dbh, x.(*dbCounter)) -} - -func (dbh *DBHeap) Pop() interface{} { - // for container/heap package: removes the top entry and resizes the heap array - old := *dbh - n := len(old) - toPop := old[n-1] - *dbh = old[0 : n-1] - return toPop -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/bindata.go b/vendor/github.com/mongodb/mongo-tools/common/json/bindata.go deleted file mode 100644 index c508ba904..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/bindata.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "fmt" - "reflect" -) - -// Transition functions for recognizing BinData. -// Adapted from encoding/json/scanner.go. - -// stateBi is the state after reading `Bi`. -func stateBi(s *scanner, c int) int { - if c == 'n' { - s.step = generateState("BinData", []byte("Data"), stateConstructor) - return scanContinue - } - return s.error(c, "in literal BinData (expecting 'n')") -} - -// Decodes a BinData literal stored in the underlying byte data into v. -func (d *decodeState) storeBinData(v reflect.Value) { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - args, err := d.ctor("BinData", []reflect.Type{byteType, stringType}) - if err != nil { - d.error(err) - } - switch kind := v.Kind(); kind { - case reflect.Interface: - arg0 := byte(args[0].Uint()) - arg1 := args[1].String() - v.Set(reflect.ValueOf(BinData{arg0, arg1})) - default: - d.error(fmt.Errorf("cannot store %v value into %v type", binDataType, kind)) - } -} - -// Returns a BinData literal from the underlying byte data. -func (d *decodeState) getBinData() interface{} { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - // Prevent d.convertNumber() from parsing the argument as a float64. - useNumber := d.useNumber - d.useNumber = true - - args := d.ctorInterface() - if err := ctorNumArgsMismatch("BinData", 2, len(args)); err != nil { - d.error(err) - } - arg0, err := args[0].(Number).Uint8() - if err != nil { - d.error(fmt.Errorf("expected byte for first argument of BinData constructor")) - } - arg1, ok := args[1].(string) - if !ok { - d.error(fmt.Errorf("expected string for second argument of BinData constructor")) - } - - d.useNumber = useNumber - return BinData{arg0, arg1} -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/boolean.go b/vendor/github.com/mongodb/mongo-tools/common/json/boolean.go deleted file mode 100644 index 2cae19de1..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/boolean.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "fmt" - "reflect" -) - -// Transition functions for recognizing Boolean. -// Adapted from encoding/json/scanner.go. - -// stateBo is the state after reading `Bo`. -func stateBo(s *scanner, c int) int { - if c == 'o' { - s.step = generateState("Boolean", []byte("lean"), stateConstructor) - return scanContinue - } - return s.error(c, "in literal Boolean (expecting 'o')") -} - -// Decodes a Boolean literal stored in the underlying byte data into v. -func (d *decodeState) storeBoolean(v reflect.Value) { - res := d.getBoolean() - switch kind := v.Kind(); kind { - case reflect.Interface, reflect.Bool: - v.Set(reflect.ValueOf(res)) - default: - d.error(fmt.Errorf("cannot store bool value into %v type", kind)) - } -} - -// Returns a Boolean literal from the underlying byte data. -func (d *decodeState) getBoolean() interface{} { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - // Prevent d.convertNumber() from parsing the argument as a float64. - useNumber := d.useNumber - d.useNumber = true - - args := d.ctorInterface() - if len(args) == 0 { - return false - } - - // Ignore all but the first argument. - switch v := args[0].(type) { - case bool: - return v - case Number: - d.useNumber = useNumber - - // First try Int64 so hex numbers work, then if that fails try Float64. - num, err := v.Int64() - if err == nil { - return (num != 0) - } - - numF, err := v.Float64() - if err != nil { - d.error(fmt.Errorf("expected float64 for numeric argument of Boolean constructor, got err: %v", err)) - } - return (numF != 0) - case string: - return (v != "") - case Undefined, nil: - return false - // Parameter values of any other types should yield true. - default: - return true - } -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/constructor.go b/vendor/github.com/mongodb/mongo-tools/common/json/constructor.go deleted file mode 100644 index 5383662dc..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/constructor.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "fmt" - "reflect" -) - -const CtorNumArgsErrorf = "expected %v argument%v to %v constructor, but %v received" - -// Transition functions for recognizing object constructors. -// Adapted from encoding/json/scanner.go. - -// stateConstructor is the state after reading a constructor name. -func stateConstructor(s *scanner, c int) int { - if c <= ' ' && isSpace(rune(c)) { - return scanSkipSpace - } - if c == '(' { - s.step = stateBeginCtorOrEmpty - s.pushParseState(parseCtorArg) - return scanBeginCtor - } - return s.error(c, "expected '('") -} - -// stateBeginCtorOrEmpty is the state after reading `(`. -func stateBeginCtorOrEmpty(s *scanner, c int) int { - if c <= ' ' && isSpace(rune(c)) { - return scanSkipSpace - } - if c == ')' { - return stateEndValue(s, c) - } - return stateBeginValue(s, c) -} - -// ctor consumes a constructor from d.data[d.off-1:], given a type specification t. -// the first byte of the constructor ('(') has been read already. -func (d *decodeState) ctor(name string, t []reflect.Type) ([]reflect.Value, error) { - result := make([]reflect.Value, 0, len(t)) - - i := 0 - for { - // Look ahead for ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndCtor { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - if i < len(t) { - v := reflect.New(t[i]).Elem() - - // Get argument of constructor - d.value(v) - - result = append(result, v) - i++ - } - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndCtor { - break - } - if op != scanCtorArg { - d.error(errPhase) - } - } - - return result, ctorNumArgsMismatch(name, len(t), i) -} - -// ctorInterface is like ctor but returns []interface{}. -func (d *decodeState) ctorInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndCtor { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface(false)) - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndCtor { - break - } - if op != scanCtorArg { - d.error(errPhase) - } - } - return v -} - -// Returns a descriptive error message if the number of arguments given -// to the constructor do not match what is expected. -func ctorNumArgsMismatch(name string, expected, actual int) error { - if expected == actual { - return nil - } - - quantifier := "" - if expected > 1 { - quantifier = "s" - } - return fmt.Errorf(CtorNumArgsErrorf, expected, quantifier, name, actual) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/consts.go b/vendor/github.com/mongodb/mongo-tools/common/json/consts.go deleted file mode 100644 index 1992a9117..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/consts.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -const ( - ArrayStart = '[' - ArraySep = ',' - ArrayEnd = ']' -) diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/csv_format.go b/vendor/github.com/mongodb/mongo-tools/common/json/csv_format.go deleted file mode 100644 index 150f7f128..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/csv_format.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "encoding/base64" - "fmt" - "time" -) - -func (b BinData) String() string { - data, err := base64.StdEncoding.DecodeString(b.Base64) - if err != nil { - return "" // XXX: panic? - } - if b.Type == 0x02 { - data = data[4:] // skip the first 4 bytes - } - return fmt.Sprintf("%X", data) // use uppercase hexadecimal -} - -func (js JavaScript) String() string { - return js.Code -} - -func (d Date) String() string { - if d.isFormatable() { - n := int64(d) - t := time.Unix(n/1e3, n%1e3*1e6) - return t.UTC().Format(JSONDateFormat) - } - // date.MarshalJSON always returns a nil err. - data, _ := d.MarshalJSON() - return string(data) -} - -func (d DBRef) String() string { - return fmt.Sprintf(`{ "$ref": "%v", "$id": %v, "$db": "%v" }`, - d.Collection, d.Id, d.Database) -} - -func (d DBPointer) String() string { - return fmt.Sprintf(`{ "$ref": "%v", "$id": %v }`, - d.Namespace, d.Id) -} - -func (f Float) String() string { - return fmt.Sprintf("%v", float64(f)) -} - -func (MinKey) String() string { - return "$MinKey" -} - -func (MaxKey) String() string { - return "$MaxKey" -} - -func (n NumberInt) String() string { - return fmt.Sprintf("%v", int32(n)) -} - -func (n NumberLong) String() string { - return fmt.Sprintf("%v", int64(n)) -} - -// Assumes that o represents a valid ObjectId -// (composed of 24 hexadecimal characters). -func (o ObjectId) String() string { - return fmt.Sprintf("ObjectId(%v)", string(o)) -} - -func (r RegExp) String() string { - return fmt.Sprintf("/%v/%v", r.Pattern, r.Options) -} - -func (t Timestamp) String() string { - return fmt.Sprintf(`{ "$timestamp": { "t": %v, "i": %v } }`, - t.Seconds, t.Increment) -} - -func (Undefined) String() string { - return `{ "$undefined": true }` -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/date.go b/vendor/github.com/mongodb/mongo-tools/common/json/date.go deleted file mode 100644 index 0acb8e1d6..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/date.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "fmt" - "reflect" - - "github.com/mongodb/mongo-tools/common/util" -) - -// Transition functions for recognizing Date. -// Adapted from encoding/json/scanner.go. - -// stateDa is the state after reading `Da`. -func stateDa(s *scanner, c int) int { - if c == 't' { - s.step = stateDat - return scanContinue - } - return s.error(c, "in literal Date (expecting 't')") -} - -// stateDat is the state after reading `Dat`. -func stateDat(s *scanner, c int) int { - if c == 'e' { - s.step = stateConstructor - return scanContinue - } - return s.error(c, "in literal Date (expecting 'e')") -} - -// Decodes a Date literal stored in the underlying byte data into v. -func (d *decodeState) storeDate(v reflect.Value) { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - args, err := d.ctor("Date", []reflect.Type{dateType}) - if err != nil { - d.error(err) - } - switch kind := v.Kind(); kind { - case reflect.Interface: - v.Set(args[0]) - default: - d.error(fmt.Errorf("cannot store %v value into %v type", dateType, kind)) - } -} - -// Returns a Date literal from the underlying byte data. -func (d *decodeState) getDate() interface{} { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - // Prevent d.convertNumber() from parsing the argument as a float64. - useNumber := d.useNumber - d.useNumber = true - - args := d.ctorInterface() - if err := ctorNumArgsMismatch("Date", 1, len(args)); err != nil { - d.error(err) - } - arg0num, isNumber := args[0].(Number) - if !isNumber { - // validate the date format of the string - _, err := util.FormatDate(args[0].(string)) - if err != nil { - d.error(fmt.Errorf("unexpected ISODate format")) - } - d.useNumber = useNumber - return ISODate(args[0].(string)) - } - arg0, err := arg0num.Int64() - if err != nil { - d.error(fmt.Errorf("expected int64 for first argument of Date constructor")) - } - - d.useNumber = useNumber - return Date(arg0) -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/dbpointer.go b/vendor/github.com/mongodb/mongo-tools/common/json/dbpointer.go deleted file mode 100644 index bbf19704c..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/dbpointer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "go.mongodb.org/mongo-driver/bson/primitive" - - "fmt" - "reflect" -) - -// Transition functions for recognizing DBPointer. -// Adapted from encoding/json/scanner.go. - -// stateDB is the state after reading `DB`. -func stateDBP(s *scanner, c int) int { - if c == 'o' { - s.step = generateState("DBPointer", []byte("inter"), stateConstructor) - return scanContinue - } - return s.error(c, "in literal DBPointer (expecting 'o')") -} - -// Decodes a DBRef literal stored in the underlying byte data into v. -func (d *decodeState) storeDBPointer(v reflect.Value) { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - args := d.ctorInterface() - if len(args) != 2 { - d.error(fmt.Errorf("expected 2 arguments to DBPointer constructor, but %v received", len(args))) - } - switch kind := v.Kind(); kind { - case reflect.Interface: - arg0, ok := args[0].(string) - if !ok { - d.error(fmt.Errorf("expected first argument to DBPointer to be of type string")) - } - arg1, ok := args[1].(ObjectId) - if !ok { - d.error(fmt.Errorf("expected second argument to DBPointer to be of type ObjectId, but ended up being %t", args[1])) - } - oid, err := primitive.ObjectIDFromHex(string(arg1)) - if err != nil { - d.error(fmt.Errorf("cannot parse ObjectID from string %v: %v", arg1, err)) - } - v.Set(reflect.ValueOf(DBPointer{arg0, oid})) - default: - d.error(fmt.Errorf("cannot store %v value into %v type", dbPointerType, kind)) - } -} - -// Returns a DBRef literal from the underlying byte data. -func (d *decodeState) getDBPointer() interface{} { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - args := d.ctorInterface() - if err := ctorNumArgsMismatch("DBPointer", 2, len(args)); err != nil { - d.error(err) - } - arg0, ok := args[0].(string) - if !ok { - d.error(fmt.Errorf("expected string for first argument of DBPointer constructor")) - } - arg1, ok := args[1].(ObjectId) - if !ok { - d.error(fmt.Errorf("expected ObjectId for second argument of DBPointer constructor")) - } - oid, err := primitive.ObjectIDFromHex(string(arg1)) - if err != nil { - d.error(fmt.Errorf("cannot parse ObjectID from string %v: %v", arg1, err)) - } - - return DBPointer{arg0, oid} -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/dbref.go b/vendor/github.com/mongodb/mongo-tools/common/json/dbref.go deleted file mode 100644 index 15fba1fbc..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/dbref.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package json - -import ( - "fmt" - "reflect" -) - -// Transition functions for recognizing DBRef and Dbref. -// Adapted from encoding/json/scanner.go. - -// stateDB is the state after reading `DB`. -func stateDBR(s *scanner, c int) int { - if c == 'e' { - s.step = generateState("DBRef", []byte("f"), stateConstructor) - return scanContinue - } - return s.error(c, "in literal DBRef (expecting 'e')") -} - -// stateDb is the state after reading `Db`. -func stateDb(s *scanner, c int) int { - if c == 'r' { - s.step = generateState("Dbref", []byte("ef"), stateConstructor) - return scanContinue - } - return s.error(c, "in literal Dbref (expecting 'r')") -} - -// Decodes a DBRef literal stored in the underlying byte data into v. -func (d *decodeState) storeDBRef(v reflect.Value) { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - args := d.ctorInterface() - if len(args) != 2 { - d.error(fmt.Errorf("expected 2 arguments to DBRef constructor, but %v received", len(args))) - } - switch kind := v.Kind(); kind { - case reflect.Interface: - arg0, ok := args[0].(string) - if !ok { - d.error(fmt.Errorf("expected first argument to DBRef to be of type string")) - } - arg1 := args[1] - v.Set(reflect.ValueOf(DBRef{arg0, arg1, ""})) - default: - d.error(fmt.Errorf("cannot store %v value into %v type", dbRefType, kind)) - } -} - -// Returns a DBRef literal from the underlying byte data. -func (d *decodeState) getDBRef() interface{} { - op := d.scanWhile(scanSkipSpace) - if op != scanBeginCtor { - d.error(fmt.Errorf("expected beginning of constructor")) - } - - args := d.ctorInterface() - if err := ctorNumArgsMismatch("DBRef", 2, len(args)); err != nil { - d.error(err) - } - arg0, ok := args[0].(string) - if !ok { - d.error(fmt.Errorf("expected string for first argument of DBRef constructor")) - } - return DBRef{arg0, args[1], ""} -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/decode.go b/vendor/github.com/mongodb/mongo-tools/common/json/decode.go deleted file mode 100644 index c87efe836..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/decode.go +++ /dev/null @@ -1,1278 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See cyclonedx.sbom.json for original license terms. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "go.mongodb.org/mongo-driver/bson" - - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshalling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// “not present,†unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -func UnmarshalMap(data []byte) (map[string]interface{}, error) { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return nil, err - } - - d.init(data) - return d.unmarshalMap() -} - -func UnmarshalBsonD(data []byte) (bson.D, error) { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return nil, err - } - - d.init(data) - return d.unmarshalBsonD() -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshalMap() (out map[string]interface{}, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - out = d.document() - return out, d.savedError -} - -func (d *decodeState) unmarshalBsonD() (out bson.D, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - out = d.bsonDocument() - return out, d.savedError -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int32 returns the number as an int32. -func (n Number) Int32() (int32, error) { - x, err := n.Int64() - return int32(x), err -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - base := 10 - if isHexPrefix(string(n)) { - base = 0 // strconv.ParseInt will infer base 16 - } - return strconv.ParseInt(string(n), base, 64) -} - -// Uint8 returns the number as an uint8. -func (n Number) Uint8() (uint8, error) { - x, err := n.Uint64() - return uint8(x), err -} - -// Uint32 returns the number as an uint32. -func (n Number) Uint32() (uint32, error) { - x, err := n.Uint64() - return uint32(x), err -} - -// Uint64 returns the number as an uint64. -func (n Number) Uint64() (uint64, error) { - base := 10 - if isHexPrefix(string(n)) { - base = 0 // strconv.ParseUint will infer base 16 - } - return strconv.ParseUint(string(n), base, 64) -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - tempstr string // scratch space to avoid some allocations - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := int(d.data[d.off]) - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -func (d *decodeState) document() map[string]interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - return nil - case scanBeginObject: - return d.objectInterface() - } -} - -func (d *decodeState) bsonDocument() bson.D { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - return nil - case scanBeginObject: - return d.bsonDInterface() - } -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type()}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface(false))) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type()}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte of the object ('{') has been read already. -func (d *decodeState) object(v reflect.Value) { - - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type()}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type()}) - break - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - - case reflect.Struct: - // do nothing - - case reflect.Slice: - // this is only a valid case if the output type is a bson.D - t := v.Type() - if t == orderedBSONType { - v.Set(reflect.ValueOf(d.bsonDInterface())) - return - } - fallthrough // can't unmarshal into a regular slice, goto "default" error case - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type()}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := maybeUnquoteBytes(item) - if !ok { - d.error(errPhase) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - // Read value. - if destring { - d.value(reflect.ValueOf(&d.tempstr)) - d.literalStore([]byte(d.tempstr), subv, true) - d.tempstr = "" // Zero scratch space for successive values. - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to an int32, int64, a float64, -// or a Number depending on the setting of d.useNumber and whether the -// string is specified in hexadecimal. It does this by parsing the string to see if it -// can an integer, if not it is treated as a float. If the integer is within the bounds of an int32 it -// is returned as an int32. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - parsedInteger, err := strconv.ParseInt(s, 0, 64) - if err != nil { - parsedFloat, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)} - } - return parsedFloat, nil - } - - if parsedInteger <= math.MaxInt32 && parsedInteger >= math.MinInt32 { - return int32(parsedInteger), nil - } - return int64(parsedInteger), nil - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - // Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := isNull(item) // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type()}) - } - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; { - case isNull(item): // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case c == 't', c == 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type()}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type()}) - } - } - - case c == '"', c == '\'': // string - - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type()}) - case reflect.Slice: - if v.Type() != byteSliceType { - d.saveError(&UnmarshalTypeError{"string", v.Type()}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.Set(reflect.ValueOf(b[0:n])) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type()}) - } - } - - case isNumber(item): - s := string(item) - switch v.Kind() { - default: - - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type()}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type()}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base := 10 - if isHexPrefix(s) { - base = 0 // strconv.ParseInt will infer base 16 - } - n, err := strconv.ParseInt(s, base, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - base := 10 - if isHexPrefix(s) { - base = 0 // strconv.ParseUint will infer base 16 - } - n, err := strconv.ParseUint(s, base, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) - break - } - v.SetFloat(n) - } - - default: - if ok := d.storeExtendedLiteral(item, v, fromQuoted); !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - } - -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{}. It takes a boolean -// parameter denoting whether or not the value is being unmarshalled within -// a bson.D, so that bson.Ds can be the default object type when -// they are inside other bson.D documents. -func (d *decodeState) valueInterface(insideBSOND bool) interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface(insideBSOND) - case scanBeginObject: - if insideBSOND { - return d.bsonDInterface() - } - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. It takes a boolean -// parameter denoting whether or not the value is being unmarshalled within -// a bson.D, so that bson.Ds can be the default object type when -// they are inside other bson.D documents. -func (d *decodeState) arrayInterface(insideBSOND bool) []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface(insideBSOND)) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// bsonDInterface is like object but returns bson.D{}. -func (d *decodeState) bsonDInterface() bson.D { - m := bson.D{} - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := maybeUnquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m = append(m, bson.E{Key: key, Value: d.valueInterface(true)}) - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := maybeUnquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface(false) - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; { - case isNull(item): // null - return nil - - case c == 't', c == 'f': // true, false - return c == 't' - - case c == '"', c == '\'': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - case isNumber(item): // number - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - - default: - if value, ok := d.getExtendedLiteral(item); ok { - return value - } - d.error(errPhase) - panic("unreachable") - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func maybeUnquote(s []byte) (t string, ok bool) { - s, ok = maybeUnquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' && s[0] != '\'' || s[len(s)-1] != '"' && s[len(s)-1] != '\'' { - return - } - singleQuoted := s[0] == '\'' - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c == '\'' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Double quote, control characters are invalid. - case !singleQuoted && c == '"', c < ' ': - return - - // Single quote characters are invalid. - case singleQuoted && c == '\'': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/encode.go b/vendor/github.com/mongodb/mongo-tools/common/json/encode.go deleted file mode 100644 index 4229269fb..000000000 --- a/vendor/github.com/mongodb/mongo-tools/common/json/encode.go +++ /dev/null @@ -1,1194 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2014-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// -// Based on github.com/golang/go by The Go Authors -// See cyclonedx.sbom.json for original license terms. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// http://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings. InvalidUTF8Error will be returned -// if an invalid UTF-8 sequence is encountered. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// or integer types. This extra level of encoding is sometimes used when -// communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the object keys are used directly -// as map keys. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML